hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
list | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
list | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
list | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
list | cell_types
list | cell_type_groups
list |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a22e9ee022907a904c9136e5e29dfe4e63eecbd
| 242,814 |
ipynb
|
Jupyter Notebook
|
python_case_studies/whiskies/Whiskies.ipynb
|
clementpoiret/PH526x-Harvard-Python-Research
|
214ad883a656ef57eea63b5bc250afc96ec9b83a
|
[
"Apache-2.0"
] | 2 |
2019-11-02T21:32:29.000Z
|
2020-07-14T03:09:42.000Z
|
python_case_studies/whiskies/Whiskies.ipynb
|
clementpoiret/PH526x-Harvard-Python-Research
|
214ad883a656ef57eea63b5bc250afc96ec9b83a
|
[
"Apache-2.0"
] | null | null | null |
python_case_studies/whiskies/Whiskies.ipynb
|
clementpoiret/PH526x-Harvard-Python-Research
|
214ad883a656ef57eea63b5bc250afc96ec9b83a
|
[
"Apache-2.0"
] | 6 |
2019-08-31T15:54:39.000Z
|
2020-12-17T01:22:42.000Z
| 116.234562 | 100,052 | 0.777879 |
[
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom bokeh.plotting import *\nfrom sklearn.cluster.bicluster import SpectralCoclustering\nfrom bokeh.models import HoverTool, ColumnDataSource\nfrom itertools import product",
"_____no_output_____"
],
[
"whisky = pd.read_csv('whiskies.txt')\nwhisky[\"Region\"] = pd.read_csv('regions.txt')",
"_____no_output_____"
],
[
"whisky.head()",
"_____no_output_____"
],
[
"whisky.tail()",
"_____no_output_____"
],
[
"flavors = whisky.iloc[:, 2:14]\nflavors",
"_____no_output_____"
],
[
"corr_flavors = pd.DataFrame.corr(flavors)",
"_____no_output_____"
],
[
"print(corr_flavors)",
" Body Sweetness Smoky Medicinal Tobacco Honey \\\nBody 1.000000 -0.136518 0.524032 0.354050 0.168718 0.082031 \nSweetness -0.136518 1.000000 -0.405897 -0.392017 -0.147871 0.132558 \nSmoky 0.524032 -0.405897 1.000000 0.686071 0.365501 -0.195318 \nMedicinal 0.354050 -0.392017 0.686071 1.000000 0.425106 -0.396629 \nTobacco 0.168718 -0.147871 0.365501 0.425106 1.000000 -0.275490 \nHoney 0.082031 0.132558 -0.195318 -0.396629 -0.275490 1.000000 \nSpicy 0.188500 -0.054200 0.231745 0.044903 0.054068 0.139563 \nWiney 0.408576 0.115727 -0.028190 -0.202651 0.009097 0.362021 \nNutty 0.126323 -0.032493 -0.023132 -0.113671 -0.117717 0.188492 \nMalty -0.116859 -0.001516 -0.192875 -0.258959 -0.059347 0.310184 \nFruity -0.013205 0.019820 -0.312970 -0.330975 -0.235145 0.108822 \nFloral -0.461203 0.144987 -0.431663 -0.511323 -0.212375 0.183029 \n\n Spicy Winey Nutty Malty Fruity Floral \nBody 0.188500 0.408576 0.126323 -0.116859 -0.013205 -0.461203 \nSweetness -0.054200 0.115727 -0.032493 -0.001516 0.019820 0.144987 \nSmoky 0.231745 -0.028190 -0.023132 -0.192875 -0.312970 -0.431663 \nMedicinal 0.044903 -0.202651 -0.113671 -0.258959 -0.330975 -0.511323 \nTobacco 0.054068 0.009097 -0.117717 -0.059347 -0.235145 -0.212375 \nHoney 0.139563 0.362021 0.188492 0.310184 0.108822 0.183029 \nSpicy 1.000000 0.092704 -0.042856 0.036303 0.144714 0.034663 \nWiney 0.092704 1.000000 0.198467 0.112368 0.090694 -0.126932 \nNutty -0.042856 0.198467 1.000000 0.066157 0.071765 0.018302 \nMalty 0.036303 0.112368 0.066157 1.000000 0.207288 0.106309 \nFruity 0.144714 0.090694 0.071765 0.207288 1.000000 0.262336 \nFloral 0.034663 -0.126932 0.018302 0.106309 0.262336 1.000000 \n"
],
[
"plt.figure(figsize=(10,10))\nplt.pcolor(corr_flavors)\nplt.colorbar()\nplt.savefig(\"corr_flavors.pdf\")",
"_____no_output_____"
],
[
"corr_whisky = pd.DataFrame.corr(flavors.transpose())\nplt.figure(figsize=(10,10))\nplt.pcolor(corr_whisky)\nplt.colorbar()\nplt.savefig(\"corr_whisky.pdf\")",
"_____no_output_____"
],
[
"model = SpectralCoclustering(n_clusters=6, random_state=0)",
"_____no_output_____"
],
[
"model.fit(corr_whisky)",
"_____no_output_____"
],
[
"model.rows_",
"_____no_output_____"
],
[
"np.sum(model.rows_, axis=0)",
"_____no_output_____"
],
[
"model.row_labels_",
"_____no_output_____"
],
[
"whisky['Group'] = pd.Series(model.row_labels_, index=whisky.index)\nwhisky = whisky.iloc[np.argsort(model.row_labels_)]\nwhisky = whisky.reset_index(drop=True)",
"_____no_output_____"
],
[
"correlations = pd.DataFrame.corr(whisky.iloc[:,2:14].transpose())",
"_____no_output_____"
],
[
"correlations = np.array(correlations)",
"_____no_output_____"
],
[
"plt.figure(figsize = (14,7))\nplt.subplot(121)\nplt.pcolor(corr_whisky)\nplt.title('Original')\nplt.axis('tight')\nplt.subplot(122)\nplt.pcolor(correlations)\nplt.title('Rearranged')\nplt.axis('tight')\nplt.savefig('correlations.pdf')",
"_____no_output_____"
],
[
"# Let's plot a simple 5x5 grid of squares, alternating in color as red and blue.\n\nplot_values = [1, 2, 3, 4, 5]\nplot_colors = [\"red\", \"blue\"]\n\n# How do we tell Bokeh to plot each point in a grid? Let's use a function that\n# finds each combination of values from 1-5.\nfrom itertools import product\n\ngrid = list(product(plot_values, plot_values))\nprint(grid)\n\n# The first value is the x coordinate, and the second value is the y coordinate.\n# Let's store these in separate lists.\n\nxs, ys = zip(*grid)\nprint(xs)\nprint(ys)\n\n# Now we will make a list of colors, alternating between red and blue.\n\ncolors = [plot_colors[i % 2] for i in range(len(grid))]\nprint(colors)\n\n# Finally, let's determine the strength of transparency (alpha) for each point,\n# where 0 is completely transparent.\n\nalphas = np.linspace(0, 1, len(grid))\n\n# Bokeh likes each of these to be stored in a special dataframe, called\n# ColumnDataSource. Let's store our coordinates, colors, and alpha values.\n\nsource = ColumnDataSource(\n data={\n \"x\": xs,\n \"y\": ys,\n \"colors\": colors,\n \"alphas\": alphas,\n }\n)\n# We are ready to make our interactive Bokeh plot!\n\noutput_file(\"Basic_Example.html\", title=\"Basic Example\")\nfig = figure(tools=\"hover, save\")\nfig.rect(\"x\", \"y\", 0.9, 0.9, source=source, color=\"colors\", alpha=\"alphas\")\nhover = fig.select(dict(type=HoverTool))\nhover.tooltips = {\n \"Value\": \"@x, @y\",\n}\nshow(fig)",
"[(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5)]\n(1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5)\n(1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5, 1, 2, 3, 4, 5)\n['red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red', 'blue', 'red']\n"
],
[
"cluster_colors = [\"red\", \"orange\", \"green\", \"blue\", \"purple\", \"gray\"]\nregions = [\"Speyside\", \"Highlands\", \"Lowlands\", \"Islands\", \"Campbelltown\", \"Islay\"]\n\nregion_colors = dict(zip(regions, cluster_colors))\nregion_colors",
"_____no_output_____"
],
[
"distilleries = list(whisky.Distillery)\ncorrelation_colors = []\nfor i in range(len(distilleries)):\n for j in range(len(distilleries)):\n if correlations[i, j] < 0.70: # if low correlation,\n correlation_colors.append('white') # just use white.\n else: # otherwise,\n if whisky.Group[i] == whisky.Group[j]: # if the groups match,\n correlation_colors.append(cluster_colors[whisky.Group[i]]) # color them by their mutual group.\n else: # otherwise\n correlation_colors.append('lightgray') # color them lightgray.",
"_____no_output_____"
],
[
"source = ColumnDataSource(\n data = {\n \"x\": np.repeat(distilleries,len(distilleries)),\n \"y\": list(distilleries)*len(distilleries),\n \"colors\": correlation_colors,\n \"alphas\": correlations.flatten(),\n \"correlations\": correlations.flatten(),\n }\n)\n\noutput_file(\"Whisky Correlations.html\", title=\"Whisky Correlations\")\nfig = figure(title=\"Whisky Correlations\",\n x_axis_location=\"above\", tools=\"hover,save\",\n x_range=list(reversed(distilleries)), y_range=distilleries)\nfig.grid.grid_line_color = None\nfig.axis.axis_line_color = None\nfig.axis.major_tick_line_color = None\nfig.axis.major_label_text_font_size = \"5pt\"\nfig.xaxis.major_label_orientation = np.pi / 3\n\nfig.rect('x', 'y', .9, .9, source=source,\n color='colors', alpha='correlations')\nhover = fig.select(dict(type=HoverTool))\nhover.tooltips = {\n \"Whiskies\": \"@x, @y\",\n \"Correlation\": \"@correlations\",\n}\nshow(fig)",
"_____no_output_____"
],
[
"points = [(0,0), (1,2), (3,1)]\nxs, ys = zip(*points)\ncolors = [\"red\", \"blue\", \"green\"]\n\noutput_file(\"Spatial_Example.html\", title=\"Regional Example\")\nlocation_source = ColumnDataSource(\n data={\n \"x\": xs,\n \"y\": ys,\n \"colors\": colors,\n }\n)\n\nfig = figure(title = \"Title\",\n x_axis_location = \"above\", tools=\"hover, save\")\nfig.plot_width = 300\nfig.plot_height = 380\nfig.circle(\"x\", \"y\",size=10, source=location_source, color='colors', line_color=None)\n\nhover = fig.select(dict(type = HoverTool))\nhover.tooltips = {\n \"Location\": \"(@x, @y)\"\n}\nshow(fig)",
"_____no_output_____"
],
[
"def location_plot(title, colors):\n output_file(title + \".html\")\n location_source = ColumnDataSource(\n data={\n \"x\": whisky[\" Latitude\"],\n \"y\": whisky[\" Longitude\"],\n \"colors\": colors,\n \"regions\": whisky.Region,\n \"distilleries\": whisky.Distillery\n }\n )\n\n fig = figure(title=title,\n x_axis_location=\"above\", tools=\"hover, save\")\n fig.plot_width = 400\n fig.plot_height = 500\n fig.circle(\"x\", \"y\", size=9, source=location_source, color='colors', line_color=None)\n fig.xaxis.major_label_orientation = np.pi / 3\n hover = fig.select(dict(type=HoverTool))\n hover.tooltips = {\n \"Distillery\": \"@distilleries\",\n \"Location\": \"(@x, @y)\"\n }\n show(fig)\n\n\nregion_cols = [region_colors[i] for i in list(whisky[\"Region\"])]\nlocation_plot(\"Whisky Locations and Regions\", region_cols)",
"_____no_output_____"
],
[
"region_cols = [region_colors[i] for i in list(whisky.Region)]\nclassification_cols = [cluster_colors[i] for i in list(whisky.Group)]\n\nlocation_plot(\"Whisky Locations and Regions\", region_cols)\nlocation_plot(\"Whisky Locations and Groups\", classification_cols)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a230e89d4e06f6b13aaf9f57cd0d73eaf6a5a62
| 2,122 |
ipynb
|
Jupyter Notebook
|
Python Basics/Pyforest.ipynb
|
ShristiJalan/Machine-Learning
|
a9abd89e358facf3067fb9d5b7e6af5e7a6d1faa
|
[
"MIT"
] | null | null | null |
Python Basics/Pyforest.ipynb
|
ShristiJalan/Machine-Learning
|
a9abd89e358facf3067fb9d5b7e6af5e7a6d1faa
|
[
"MIT"
] | null | null | null |
Python Basics/Pyforest.ipynb
|
ShristiJalan/Machine-Learning
|
a9abd89e358facf3067fb9d5b7e6af5e7a6d1faa
|
[
"MIT"
] | null | null | null | 30.314286 | 424 | 0.572573 |
[
[
[
"### Pyforest - lazy-import of all Python Data Science libraries\n\npyforest lazy-imports all popular Python Data Science libraries so that they are always there when you need them. If you don't use a library, it won't be imported. When you are done with your script, you can export the Python code for the import statements.",
"_____no_output_____"
]
],
[
[
"df = pd.read_csv(\"http://winterolympicsmedals.com/medals.csv\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a23138f9db57b2b3a89ff91cc1a3eaa326452cf
| 3,874 |
ipynb
|
Jupyter Notebook
|
examples/reference/widgets/DateRangeSlider.ipynb
|
andriyor/panel
|
e1d76f415df02ca90f54d42ebcaf42a1bcc65560
|
[
"BSD-3-Clause"
] | 1,130 |
2019-11-23T09:53:37.000Z
|
2022-03-31T11:30:07.000Z
|
examples/reference/widgets/DateRangeSlider.ipynb
|
andriyor/panel
|
e1d76f415df02ca90f54d42ebcaf42a1bcc65560
|
[
"BSD-3-Clause"
] | 2,265 |
2019-11-20T17:09:09.000Z
|
2022-03-31T22:09:38.000Z
|
examples/reference/widgets/DateRangeSlider.ipynb
|
datalayer-externals/holoviz-panel
|
5e25cb09447d8edf0b316f130ee1318a2aeb880f
|
[
"BSD-3-Clause"
] | 215 |
2019-11-26T11:49:04.000Z
|
2022-03-30T10:23:11.000Z
| 35.218182 | 474 | 0.610222 |
[
[
[
"import datetime as dt\nimport panel as pn\n\npn.extension()",
"_____no_output_____"
]
],
[
[
"The ``DateRangeSlider`` widget allows selecting a date range using a slider with two handles.\n\nFor more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).\n\n#### Parameters:\n\nFor layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).\n\n\n##### Core\n\n* **``start``** (datetime): The range's lower bound\n* **``end``** (datetime): The range's upper bound\n* **``value``** (tuple): Tuple of upper and lower bounds of the selected range expressed as datetime types\n* **``value_throttled``** (tuple): Tuple of upper and lower bounds of the selected range expressed as datetime types throttled until mouseup\n\n##### Display\n\n* **``bar_color``** (color): Color of the slider bar as a hexadecimal RGB value\n* **``callback_policy``** (str, **DEPRECATED**): Policy to determine when slider events are triggered (one of 'continuous', 'throttle', 'mouseup')\n* **``callback_throttle``** (int): Number of milliseconds to pause between callback calls as the slider is moved\n* **``direction``** (str): Whether the slider should go from left to right ('ltr') or right to left ('rtl')\n* **``disabled``** (boolean): Whether the widget is editable\n* **``name``** (str): The title of the widget\n* **``orientation``** (str): Whether the slider should be displayed in a 'horizontal' or 'vertical' orientation.\n* **``tooltips``** (boolean): Whether to display tooltips on the slider handle\n\n___\n\nThe slider start and end can be adjusted by dragging the handles and whole range can be shifted by dragging the selected range.",
"_____no_output_____"
]
],
[
[
"date_range_slider = pn.widgets.DateRangeSlider(\n name='Date Range Slider',\n start=dt.datetime(2017, 1, 1), end=dt.datetime(2019, 1, 1),\n value=(dt.datetime(2017, 1, 1), dt.datetime(2018, 1, 10))\n)\n\ndate_range_slider",
"_____no_output_____"
]
],
[
[
"``DateRangeSlider.value`` returns a tuple of datetime values that can be read out and set like other widgets:",
"_____no_output_____"
]
],
[
[
"date_range_slider.value",
"_____no_output_____"
]
],
[
[
"### Controls\n\nThe `DateRangeSlider` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:",
"_____no_output_____"
]
],
[
[
"pn.Row(date_range_slider.controls(jslink=True), date_range_slider)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a231c4a7cc3adcd99b2105c9c9b441fdc1df23b
| 4,990 |
ipynb
|
Jupyter Notebook
|
notebooks/Theano MNIST.ipynb
|
xkortex/tensorflow-examples
|
b1709cfdd8e1d87309af0a32d91f60448baaa8f3
|
[
"Apache-2.0"
] | 11 |
2017-12-18T19:37:46.000Z
|
2020-10-08T08:14:10.000Z
|
notebooks/Theano MNIST.ipynb
|
IanLewis/tensorflow-examples
|
b1709cfdd8e1d87309af0a32d91f60448baaa8f3
|
[
"Apache-2.0"
] | 1 |
2018-06-26T14:59:24.000Z
|
2018-06-26T14:59:24.000Z
|
notebooks/Theano MNIST.ipynb
|
ianlewis/tensorflow-examples
|
b1709cfdd8e1d87309af0a32d91f60448baaa8f3
|
[
"Apache-2.0"
] | 37 |
2018-03-26T06:08:42.000Z
|
2021-12-08T04:10:48.000Z
| 23.990385 | 237 | 0.536072 |
[
[
[
"# Theano MNIST\nBefore start using this, please select `Cell` - `All Output` - `Clear` to clear the old results. See [Classifying MNIST digits using Logistic Regression](http://deeplearning.net/tutorial/logreg.html) for details of the tutorial.\n\n# Loading MNIST training data",
"_____no_output_____"
]
],
[
[
"import theano\nimport theano.tensor as T\nimport numpy",
"_____no_output_____"
],
[
"# import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)",
"_____no_output_____"
]
],
[
[
"# Defining a Neural Network",
"_____no_output_____"
]
],
[
[
"# initialize with 0 the weights W as a matrix of shape (n_in, n_out)\nW = theano.shared(\n value=numpy.zeros(\n (784, 10),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n)\n\n# initialize the biases b as a vector of n_out 0s\nb = theano.shared(\n value=numpy.zeros(\n (10,),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n)\n\n# generate symbolic variables for input (x and y represent a\n# minibatch)\nx = T.matrix('x') # data, presented as rasterized images\n\ny = T.nnet.softmax(T.dot(x, W) + b)\ny",
"_____no_output_____"
],
[
"#y_ = T.ivector('y') # labels, presented as 1D vector of [int] labels\ny_ = T.matrix('y')\ncross_entropy = -T.sum(y_*T.log(y))",
"_____no_output_____"
]
],
[
[
"# Defining the Train Step",
"_____no_output_____"
]
],
[
[
"# allocate symbolic variables for the data\nindex = T.lscalar() # index to a [mini]batch\n\nlearning_rate = 0.1\n\n# compute the gradient of cost with respect to theta = (W,b)\ng_W = T.grad(cost=cross_entropy, wrt=W)\ng_b = T.grad(cost=cross_entropy, wrt=b)\n\n# start-snippet-3\n# specify how to update the parameters of the model as a list of\n# (variable, update expression) pairs.\nupdates = [(W, W - learning_rate * g_W),\n (b, b - learning_rate * g_b)]\n\ntrain_model = theano.function(\n inputs=[x, y_],\n outputs=cross_entropy,\n updates=updates\n)\n\ntrain_model",
"_____no_output_____"
]
],
[
[
"# Do 1000 times of mini-batch training",
"_____no_output_____"
]
],
[
[
"for i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n\n batch_xs = numpy.asarray(batch_xs, dtype=theano.config.floatX)\n batch_ys = numpy.asarray(batch_ys, dtype=theano.config.floatX)\n\n train_model(batch_xs, batch_ys)",
"_____no_output_____"
]
],
[
[
"# Test",
"_____no_output_____"
]
],
[
[
"test_set_x = numpy.asarray(mnist.test.images, dtype=theano.config.floatX)\ntest_set_y = numpy.asarray(mnist.test.labels, dtype=theano.config.floatX)\n\ncorrect_prediction = T.eq(T.argmax(y, 1), T.argmax(y_, 1))\naccuracy = T.mean(T.cast(correct_prediction, 'float32'))\naccuracy_f = theano.function(\n inputs=[x, y_],\n outputs=accuracy,\n)\nprint(accuracy_f(test_set_x, test_set_y))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a231ebb10dfc8686be5e6ed757b6a09f785d3dc
| 17,530 |
ipynb
|
Jupyter Notebook
|
notebooks/Therese/TrimSamBySlidingWindowDepth.ipynb
|
VCMason/PyGenToolbox
|
3367a9b3df3bdb0223dd9671e9d355b81455fe2f
|
[
"MIT"
] | null | null | null |
notebooks/Therese/TrimSamBySlidingWindowDepth.ipynb
|
VCMason/PyGenToolbox
|
3367a9b3df3bdb0223dd9671e9d355b81455fe2f
|
[
"MIT"
] | null | null | null |
notebooks/Therese/TrimSamBySlidingWindowDepth.ipynb
|
VCMason/PyGenToolbox
|
3367a9b3df3bdb0223dd9671e9d355b81455fe2f
|
[
"MIT"
] | null | null | null | 34.575937 | 261 | 0.670222 |
[
[
[
"%load_ext autoreload\n%autoreload 2\nimport datetime\nprint(datetime.datetime.now())\n\nfrom pygentoolbox.TrimSamBySlidingWindowsMeanDepth import main\n#dir(pygentoolbox.Tools)\n%matplotlib inline\nimport matplotlib.pyplot as plt",
"2020-01-24 14:18:17.806906\n"
],
[
"filelist = ['D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\52_Late.sort.sam']\n#filelist = ['D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\52_Late.Uniq.23M.sort.sam', 'D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\53_Later.Uniq.23M.sort.sam']\n# filelist_f = ['D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\52_Late.Uniq.23M.F.sort.sam', 'D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\53_Later.Uniq.23M.F.sort.sam']\n# filelist_r = ['D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\52_Late.Uniq.23M.R.sort.sam', 'D:\\\\LinuxShare\\\\Projects\\\\Theresa\\\\Hisat2\\\\FlagHA_Pt08\\\\Pt_51_MacAndIES\\\\53_Later.Uniq.23M.R.sort.sam']\ninterval = 100\ncutoff = 1000\n\nmain(filelist, interval, cutoff)",
"Calculating depth of coverage for: D:\\LinuxShare\\Projects\\Theresa\\Hisat2\\FlagHA_Pt08\\Pt_51_MacAndIES\\52_Late.sort.sam\nStarting sliding window analysis\nscaffold51_9_with_IES\nscaffold51_221_with_IES\nscaffold51_186_with_IES\nscaffold51_491_with_IES\nscaffold51_207_with_IES\nscaffold51_281_with_IES\nscaffold51_166_with_IES\nscaffold51_89_with_IES\nscaffold51_533_with_IES\nscaffold51_111_with_IES\nscaffold51_246_with_IES\nscaffold51_147_with_IES\nscaffold51_373_with_IES\nscaffold51_569_with_IES\nscaffold51_250_with_IES\nscaffold51_258_with_IES\nscaffold51_1_with_IES\nscaffold51_315_with_IES\nscaffold51_388_with_IES\nscaffold51_279_with_IES\nscaffold51_605_with_IES\nscaffold51_151_with_IES\nscaffold51_185_with_IES\nscaffold51_188_with_IES\nscaffold51_25_with_IES\nscaffold51_332_with_IES\nscaffold51_103_with_IES\nscaffold51_382_with_IES\nscaffold51_177_with_IES\nscaffold51_489_with_IES\nscaffold51_160_with_IES\nscaffold51_432_with_IES\nscaffold51_433_with_IES\nscaffold51_175_with_IES\nscaffold51_176_with_IES\nscaffold51_194_with_IES\nscaffold51_77_with_IES\nscaffold51_353_with_IES\nscaffold51_145_with_IES\nscaffold51_229_with_IES\nscaffold51_74_with_IES\nscaffold51_232_with_IES\nscaffold51_355_with_IES\nscaffold51_504_with_IES\nscaffold51_205_with_IES\nscaffold51_37_with_IES\nscaffold51_131_with_IES\nscaffold51_417_with_IES\nscaffold51_466_with_IES\nscaffold51_625_with_IES\nscaffold51_459_with_IES\nscaffold51_680_with_IES\nscaffold51_15_with_IES\nscaffold51_61_with_IES\nscaffold51_237_with_IES\nscaffold51_167_with_IES\nscaffold51_139_with_IES\nscaffold51_169_with_IES\nscaffold51_17_with_IES\nscaffold51_60_with_IES\nscaffold51_423_with_IES\nscaffold51_174_with_IES\nscaffold51_579_with_IES\nscaffold51_578_with_IES\nscaffold51_529_with_IES\nscaffold51_326_with_IES\nscaffold51_53_with_IES\nscaffold51_140_with_IES\nscaffold51_73_with_IES\nscaffold51_516_with_IES\nscaffold51_128_with_IES\nscaffold51_102_with_IES\nscaffold51_475_with_IES\nscaffold51_350_with_IES\nscaffold51_189_with_IES\nscaffold51_541_with_IES\nscaffold51_42_with_IES\nscaffold51_568_with_IES\nscaffold51_291_with_IES\nscaffold51_621_with_IES\nscaffold51_212_with_IES\nscaffold51_180_with_IES\nscaffold51_252_with_IES\nscaffold51_170_with_IES\nscaffold51_313_with_IES\nscaffold51_234_with_IES\nscaffold51_68_with_IES\nscaffold51_628_with_IES\nscaffold51_159_with_IES\nscaffold51_643_with_IES\nscaffold51_276_with_IES\nscaffold51_502_with_IES\nscaffold51_123_with_IES\nscaffold51_240_with_IES\nscaffold51_154_with_IES\nscaffold51_120_with_IES\nscaffold51_521_with_IES\nscaffold51_287_with_IES\nscaffold51_155_with_IES\nscaffold51_277_with_IES\nscaffold51_47_with_IES\nscaffold51_255_with_IES\nscaffold51_202_with_IES\nscaffold51_640_with_IES\nscaffold51_290_with_IES\nscaffold51_82_with_IES\nscaffold51_171_with_IES\nscaffold51_97_with_IES\nscaffold51_673_with_IES\nscaffold51_664_with_IES\nscaffold51_96_with_IES\nscaffold51_362_with_IES\nscaffold51_65_with_IES\nscaffold51_93_with_IES\nscaffold51_301_with_IES\nscaffold51_392_with_IES\nscaffold51_115_with_IES\nscaffold51_41_with_IES\nscaffold51_63_with_IES\nscaffold51_627_with_IES\nscaffold51_342_with_IES\nscaffold51_2_with_IES\nscaffold51_130_with_IES\nscaffold51_570_with_IES\nscaffold51_298_with_IES\nscaffold51_460_with_IES\nscaffold51_106_with_IES\nscaffold51_165_with_IES\nscaffold51_407_with_IES\nscaffold51_593_with_IES\nscaffold51_34_with_IES\nscaffold51_199_with_IES\nscaffold51_404_with_IES\nscaffold51_239_with_IES\nscaffold51_346_with_IES\nscaffold51_201_with_IES\nscaffold51_519_with_IES\nscaffold51_337_with_IES\nscaffold51_599_with_IES\nscaffold51_197_with_IES\nscaffold51_203_with_IES\nscaffold51_49_with_IES\nscaffold51_143_with_IES\nscaffold51_335_with_IES\nscaffold51_299_with_IES\nscaffold51_85_with_IES\nscaffold51_66_with_IES\nscaffold51_45_with_IES\nscaffold51_31_with_IES\nscaffold51_24_with_IES\nscaffold51_456_with_IES\nscaffold51_296_with_IES\nscaffold51_132_with_IES\nscaffold51_162_with_IES\nscaffold51_125_with_IES\nscaffold51_118_with_IES\nscaffold51_32_with_IES\nscaffold51_697_with_IES\nscaffold51_30_with_IES\nscaffold51_153_with_IES\nscaffold51_500_with_IES\nscaffold51_69_with_IES\nscaffold51_284_with_IES\nscaffold51_463_with_IES\nscaffold51_19_with_IES\nscaffold51_44_with_IES\nscaffold51_390_with_IES\nscaffold51_84_with_IES\nscaffold51_78_with_IES\nscaffold51_26_with_IES\nscaffold51_70_with_IES\nscaffold51_28_with_IES\nscaffold51_11_with_IES\nscaffold51_99_with_IES\nscaffold51_134_with_IES\nscaffold51_329_with_IES\nscaffold51_127_with_IES\nscaffold51_105_with_IES\nscaffold51_27_with_IES\nscaffold51_23_with_IES\nscaffold51_14_with_IES\nscaffold51_114_with_IES\nscaffold51_149_with_IES\nscaffold51_230_with_IES\nscaffold51_261_with_IES\nscaffold51_546_with_IES\nscaffold51_438_with_IES\nscaffold51_58_with_IES\nscaffold51_292_with_IES\nscaffold51_267_with_IES\nscaffold51_181_with_IES\nscaffold51_50_with_IES\nscaffold51_374_with_IES\nscaffold51_430_with_IES\nscaffold51_216_with_IES\nscaffold51_582_with_IES\nscaffold51_228_with_IES\nscaffold51_323_with_IES\nscaffold51_161_with_IES\nscaffold51_585_with_IES\nscaffold51_349_with_IES\nscaffold51_211_with_IES\nscaffold51_264_with_IES\nscaffold51_365_with_IES\nscaffold51_129_with_IES\nscaffold51_387_with_IES\nscaffold51_79_with_IES\nscaffold51_135_with_IES\nscaffold51_508_with_IES\nscaffold51_33_with_IES\nscaffold51_393_with_IES\nscaffold51_86_with_IES\nscaffold51_124_with_IES\nscaffold51_94_with_IES\nscaffold51_110_with_IES\nscaffold51_295_with_IES\nscaffold51_136_with_IES\nscaffold51_104_with_IES\nscaffold51_18_with_IES\nscaffold51_259_with_IES\nscaffold51_62_with_IES\nscaffold51_101_with_IES\nscaffold51_54_with_IES\nscaffold51_35_with_IES\nscaffold51_4_with_IES\nscaffold51_48_with_IES\nscaffold51_164_with_IES\nscaffold51_564_with_IES\nscaffold51_303_with_IES\nscaffold51_39_with_IES\nscaffold51_316_with_IES\nscaffold51_386_with_IES\nscaffold51_20_with_IES\nscaffold51_59_with_IES\nscaffold51_126_with_IES\nscaffold51_389_with_IES\nscaffold51_396_with_IES\nscaffold51_333_with_IES\nscaffold51_146_with_IES\nscaffold51_652_with_IES\nscaffold51_588_with_IES\nscaffold51_238_with_IES\nscaffold51_422_with_IES\nscaffold51_157_with_IES\nscaffold51_631_with_IES\nscaffold51_320_with_IES\nscaffold51_83_with_IES\nscaffold51_339_with_IES\nscaffold51_447_with_IES\nscaffold51_198_with_IES\nscaffold51_444_with_IES\nscaffold51_686_with_IES\nscaffold51_410_with_IES\nscaffold51_690_with_IES\nscaffold51_590_with_IES\nscaffold51_306_with_IES\nscaffold51_325_with_IES\nscaffold51_403_with_IES\nscaffold51_274_with_IES\nscaffold51_331_with_IES\nscaffold51_218_with_IES\nscaffold51_178_with_IES\nscaffold51_271_with_IES\nscaffold51_399_with_IES\nscaffold51_117_with_IES\nscaffold51_107_with_IES\nscaffold51_51_with_IES\nscaffold51_288_with_IES\nscaffold51_40_with_IES\nscaffold51_576_with_IES\nscaffold51_90_with_IES\nscaffold51_193_with_IES\nscaffold51_618_with_IES\nscaffold51_183_with_IES\nscaffold51_580_with_IES\nscaffold51_190_with_IES\nscaffold51_691_with_IES\nscaffold51_539_with_IES\nscaffold51_91_with_IES\nscaffold51_262_with_IES\nscaffold51_361_with_IES\nscaffold51_464_with_IES\nscaffold51_397_with_IES\nscaffold51_655_with_IES\nscaffold51_196_with_IES\nscaffold51_573_with_IES\nscaffold51_150_with_IES\nscaffold51_116_with_IES\nscaffold51_247_with_IES\nscaffold51_451_with_IES\nscaffold51_184_with_IES\nscaffold51_36_with_IES\nscaffold51_450_with_IES\nscaffold51_138_with_IES\nscaffold51_142_with_IES\nscaffold51_52_with_IES\nscaffold51_278_with_IES\nscaffold51_269_with_IES\nscaffold51_406_with_IES\nscaffold51_253_with_IES\nscaffold51_182_with_IES\nscaffold51_527_with_IES\nscaffold51_677_with_IES\nscaffold51_257_with_IES\nscaffold51_437_with_IES\nscaffold51_314_with_IES\nscaffold51_660_with_IES\nscaffold51_227_with_IES\nscaffold51_191_with_IES\nscaffold51_442_with_IES\nscaffold51_225_with_IES\nscaffold51_163_with_IES\nscaffold51_256_with_IES\nscaffold51_208_with_IES\nscaffold51_215_with_IES\nscaffold51_209_with_IES\nscaffold51_16_with_IES\nscaffold51_113_with_IES\nscaffold51_121_with_IES\nscaffold51_64_with_IES\nscaffold51_363_with_IES\nscaffold51_204_with_IES\nscaffold51_55_with_IES\nscaffold51_286_with_IES\nscaffold51_87_with_IES\nscaffold51_12_with_IES\nscaffold51_7_with_IES\nscaffold51_266_with_IES\nscaffold51_92_with_IES\nscaffold51_141_with_IES\nscaffold51_76_with_IES\nscaffold51_108_with_IES\nscaffold51_67_with_IES\nscaffold51_46_with_IES\nscaffold51_137_with_IES\nscaffold51_265_with_IES\nscaffold51_503_with_IES\nscaffold51_249_with_IES\nscaffold51_624_with_IES\nscaffold51_133_with_IES\nscaffold51_609_with_IES\nscaffold51_595_with_IES\nscaffold51_656_with_IES\nscaffold51_72_with_IES\nscaffold51_148_with_IES\nscaffold51_29_with_IES\nscaffold51_411_with_IES\nscaffold51_10_with_IES\nscaffold51_498_with_IES\nscaffold51_248_with_IES\nscaffold51_254_with_IES\nscaffold51_310_with_IES\nscaffold51_336_with_IES\nscaffold51_452_with_IES\nscaffold51_6_with_IES\nscaffold51_112_with_IES\nscaffold51_98_with_IES\nscaffold51_13_with_IES\nscaffold51_88_with_IES\nscaffold51_268_with_IES\nscaffold51_214_with_IES\nscaffold51_226_with_IES\nscaffold51_613_with_IES\nscaffold51_391_with_IES\nscaffold51_435_with_IES\nscaffold51_307_with_IES\nscaffold51_119_with_IES\nscaffold51_22_with_IES\nscaffold51_499_with_IES\nscaffold51_530_with_IES\nscaffold51_187_with_IES\nscaffold51_172_with_IES\nscaffold51_263_with_IES\nscaffold51_122_with_IES\nscaffold51_285_with_IES\nscaffold51_318_with_IES\nscaffold51_344_with_IES\nscaffold51_179_with_IES\nscaffold51_587_with_IES\nscaffold51_244_with_IES\nscaffold51_520_with_IES\nscaffold51_56_with_IES\nscaffold51_21_with_IES\nscaffold51_80_with_IES\nscaffold51_71_with_IES\nscaffold51_152_with_IES\nscaffold51_501_with_IES\nscaffold51_81_with_IES\nscaffold51_511_with_IES\nscaffold51_620_with_IES\nscaffold51_461_with_IES\nscaffold51_535_with_IES\nscaffold51_309_with_IES\nscaffold51_356_with_IES\nscaffold51_494_with_IES\nscaffold51_38_with_IES\nscaffold51_3_with_IES\nscaffold51_158_with_IES\nscaffold51_57_with_IES\nscaffold51_43_with_IES\nscaffold51_436_with_IES\nscaffold51_109_with_IES\nscaffold51_639_with_IES\nscaffold51_679_with_IES\nscaffold51_100_with_IES\nscaffold51_413_with_IES\nscaffold51_622_with_IES\nscaffold51_317_with_IES\nscaffold51_95_with_IES\nscaffold51_75_with_IES\nscaffold51_156_with_IES\nscaffold51_8_with_IES\nscaffold51_144_with_IES\nscaffold51_168_with_IES\nscaffold51_5_with_IES\nscaffold51_173_with_IES\nscaffold51_213_with_IES\nscaffold51_523_with_IES\nscaffold51_556_with_IES\n*\nFinished with sliding windows\nWriting windows to: D:\\LinuxShare\\Projects\\Theresa\\Hisat2\\FlagHA_Pt08\\Pt_51_MacAndIES\\52_Late.sort.windows.w100.d1000.bed\nWriting windows to: D:\\LinuxShare\\Projects\\Theresa\\Hisat2\\FlagHA_Pt08\\Pt_51_MacAndIES\\52_Late.sort.collapsedwindows.w100.d1000.bed\nWriting trimmed sam to: D:\\LinuxShare\\Projects\\Theresa\\Hisat2\\FlagHA_Pt08\\Pt_51_MacAndIES\\52_Late.sort.windows.w100.d1000.sam\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a231f3d67ea211fdc298125c6de1e23df7a7254
| 19,033 |
ipynb
|
Jupyter Notebook
|
scripts/temporal_features.ipynb
|
cmougan/Novartis2020
|
390f34efa6bbc1e168f4e58d2d335c7cfa7d865e
|
[
"MIT"
] | null | null | null |
scripts/temporal_features.ipynb
|
cmougan/Novartis2020
|
390f34efa6bbc1e168f4e58d2d335c7cfa7d865e
|
[
"MIT"
] | null | null | null |
scripts/temporal_features.ipynb
|
cmougan/Novartis2020
|
390f34efa6bbc1e168f4e58d2d335c7cfa7d865e
|
[
"MIT"
] | null | null | null | 28.280832 | 138 | 0.566542 |
[
[
[
"### Imports",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport os\nimport numpy as np\nfrom category_encoders import TargetEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.linear_model import Lasso\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
]
],
[
[
"### Load Data",
"_____no_output_____"
]
],
[
[
"root_dir = os.path.dirname(os.getcwd())\ndata_dir = os.path.join(root_dir, 'data')\nfull_raw_initial_dataset_path = os.path.join(data_dir, 'gx_merged_lags_months.csv')\nvolume_path = os.path.join(data_dir, 'gx_volume.csv')\ntrain_path = os.path.join(data_dir, 'train_split.csv')\nfeatures_path = os.path.join(data_dir, 'features')",
"_____no_output_____"
],
[
"volume = pd.read_csv(volume_path, index_col=0)\ntrain_ids = pd.read_csv(train_path)\nfull_raw_initial_dataset = pd.read_csv(full_raw_initial_dataset_path)",
"_____no_output_____"
],
[
"full_initial_dataset = full_raw_initial_dataset.loc[\n full_raw_initial_dataset.test == 0,:].drop(columns = 'test').drop_duplicates()",
"_____no_output_____"
],
[
"full_initial_dataset",
"_____no_output_____"
]
],
[
[
"### Functions",
"_____no_output_____"
]
],
[
[
"def find_closest_volume(country, brand, month_num, length_serie, func):\n ind = (volume.country == country) & (volume.brand == brand) & (volume.month_num <month_num)\n volume_filter = volume.loc[ind, :]\n volume_sorted = volume_filter.sort_values(by=['month_num'], ascending=False)\n volume_sorted.reset_index(inplace=True, drop=True)\n total_obs = len(volume_sorted)\n total_to_select = length_serie if length_serie<=total_obs else total_obs \n volumes_selected = volume_sorted.volume[:total_to_select].values\n return func(volumes_selected)",
"_____no_output_____"
]
],
[
[
"### Create initial datasets",
"_____no_output_____"
]
],
[
[
"train = train_ids.merge(\n full_initial_dataset,\n 'inner',\n on=['country', 'brand']\n)",
"_____no_output_____"
],
[
"#sanitiy checks\nassert len(train.loc[:,['country', 'brand', 'month_num']].drop_duplicates()) == \\\nlen(train), 'duplicated'",
"_____no_output_____"
]
],
[
[
"### Features",
"_____no_output_____"
],
[
"#### Add feature\n$$vol_{-1}$$\n* Name: volume_1",
"_____no_output_____"
]
],
[
[
"volume_at_1 = volume.loc[volume.month_num == -1, ['country', 'brand', 'volume']].\\\n drop_duplicates().\\\n rename(columns={'volume':'volume_1'})",
"_____no_output_____"
],
[
"full_with_volume_1 = full_initial_dataset.merge(\n volume_at_1,\n 'left',\n on=['country', 'brand']\n)",
"_____no_output_____"
],
[
"assert len(full_initial_dataset) == len(full_with_volume_1), 'There are duplicated'",
"_____no_output_____"
]
],
[
[
"#### Add feature\n$$log\\Big(\\frac{vol_{t} + 1}{vol_{-1}}\\Big)$$\n* Name: log_relative_volume",
"_____no_output_____"
]
],
[
[
"train_with_relative_volume = train.merge(\n volume_at_1,\n 'left',\n on=['country', 'brand']\n)\ntrain_with_relative_volume['log_relative_volume'] = np.log(\n (train_with_relative_volume.volume+1)/(train_with_relative_volume.volume_1)\n)",
"_____no_output_____"
],
[
"train_with_relative_volume.sort_values(by=['country', 'brand', 'month_num'], inplace=True)",
"_____no_output_____"
],
[
"train_with_relative_volume['lag_log_relative_volume'] = train_with_relative_volume.groupby(\n ['country', 'brand'])['log_relative_volume'].shift(1)",
"_____no_output_____"
],
[
"train_with_relative_volume['lag_log_relative_volume'] = np.where(\n train_with_relative_volume.month_num == 0,\n np.log((1+train_with_relative_volume.volume_1)/train_with_relative_volume.volume_1),\n train_with_relative_volume.lag_log_relative_volume\n)",
"_____no_output_____"
],
[
"features = train_with_relative_volume.drop(columns=['volume', 'log_relative_volume'])\ntarget = train_with_relative_volume['log_relative_volume']",
"_____no_output_____"
],
[
"categorical_cols = ['country', 'brand', 'therapeutic_area', 'presentation', 'month_name']\nte = TargetEncoder(cols=categorical_cols)\npipe = Pipeline([\n (\"te\", te),\n (\"imp\", SimpleImputer(strategy=\"mean\")),\n (\"sc\", StandardScaler()),\n (\"model\", Lasso(alpha=0.001, max_iter=2000))\n])",
"_____no_output_____"
],
[
"pipe.fit(features, target)",
"_____no_output_____"
],
[
"pipe[-1].coef_",
"_____no_output_____"
],
[
"def get_log_relative_volume(model, features):\n features_copy = features.copy()\n features_copy.sort_values(by=['country', 'brand', 'month_num'], inplace=True)\n features_copy['log_relative_volume'] = float('-inf')\n i=0\n for index, row in features_copy.iterrows():\n if(i%5000 == 0):\n print('Iteration:', i)\n \n country = row.country\n brand = row.brand\n month = row.month_num\n \n if month==0:\n row.at['lag_log_relative_volume'] = 0\n else:\n ind = (features_copy.brand == brand) &\\\n (features_copy.country == country) &\\\n (features_copy.month_num == month-1) \n lag_log_relative_volume = features_copy.loc[ind, 'log_relative_volume']\n row.at['lag_log_relative_volume'] = lag_log_relative_volume\n \n df = row.to_frame().T.drop(columns=['log_relative_volume'])\n pred_val = model.predict(df)\n ind = (features_copy.brand == brand) & (features_copy.country == country) & (features_copy.month_num == month) \n features_copy.loc[ind, 'log_relative_volume'] = pred_val[0]\n i+=1\n return features_copy",
"_____no_output_____"
],
[
"preds = get_log_relative_volume(\n pipe, \n full_with_volume_1.loc[:, features.columns[:-1]]\n)",
"_____no_output_____"
],
[
"assert len(preds) == len(full_with_volume_1), 'Duplicated'",
"_____no_output_____"
],
[
"assert sum(preds['log_relative_volume'].isna()) == 0, 'Missing'\nassert sum(preds['log_relative_volume'].isnull()) == 0, 'Missing'",
"_____no_output_____"
],
[
"features_df = preds.loc[\n :, \n ['country', 'brand', 'month_num', 'volume_1', 'log_relative_volume']].drop_duplicates()",
"_____no_output_____"
],
[
"assert len(features_df) == len(features_df.loc[:, ['country', 'brand', 'month_num']]), 'Duplicates'",
"_____no_output_____"
],
[
"features_df.to_csv(os.path.join(features_path, 'feat_01.csv'), index=False)",
"_____no_output_____"
]
],
[
[
"## Add feature\n$$log\\Big(\\frac{vol_{t} + 1}{vol_{t-1}+1}\\Big)$$\n\n* Name: relative_volume_previous",
"_____no_output_____"
]
],
[
[
"train_with_predicted_log_relative_volume = train.merge(\n preds.loc[:, ['country', 'brand', 'month_num', 'volume_1', 'log_relative_volume']],\n 'inner',\n on=['country', 'brand', 'month_num']\n)",
"_____no_output_____"
],
[
"assert len(train_with_predicted_log_relative_volume) == len(train), 'Duplicated values'",
"_____no_output_____"
],
[
"volume_previous_month = train_with_predicted_log_relative_volume.copy()\nvolume_previous_month['previous_month'] = volume_previous_month.month_num - 1\nvolume_previous_month = volume_previous_month.merge(\n volume.loc[: , ['country', 'brand', 'volume', 'month_num']].rename(\n columns={'volume':'volume_lag_1', 'month_num':'previous_month'}\n ),\n 'left',\n on=['country', 'brand', 'previous_month']\n).merge(\n volume.loc[volume.month_num == -2, ['country', 'brand', 'volume']].rename(\n columns={'volume':'volume_2'}\n ),\n 'left',\n on=['country', 'brand']\n)",
"_____no_output_____"
],
[
"assert len(volume_previous_month) == len(train_with_predicted_log_relative_volume), 'Duplicated values'\nassert sum(volume_previous_month.volume_lag_1.isna()) == 0, 'NA values'\nassert sum(volume_previous_month.volume_lag_1.isnull()) == 0, 'NA values'",
"_____no_output_____"
],
[
"assert sum(volume_previous_month.volume_2.isna()) == 0, 'NA values'\nassert sum(volume_previous_month.volume_2.isnull()) == 0, 'NA values'",
"_____no_output_____"
],
[
"volume_previous_month['log_relative_volume_previous'] = np.log(\n (volume_previous_month.volume + 1)/(volume_previous_month.volume_lag_1 + 1)\n)\nvolume_previous_month['log_relative_volume_1'] = np.log(\n (volume_previous_month.volume_1 + 1)/(volume_previous_month.volume_2 + 1)\n)",
"_____no_output_____"
],
[
"assert sum(volume_previous_month.log_relative_volume_previous.isna()) == 0, 'log_relative_volume_previous contains NA values'\nassert sum(volume_previous_month.log_relative_volume_previous.isnull()) == 0, 'log_relative_volume_previous contains null values'\nassert sum(volume_previous_month.log_relative_volume_previous == np.inf) == 0, 'log_relative_volume_previous contains inf values'\nassert sum(volume_previous_month.log_relative_volume_previous == -np.inf) == 0, 'log_relative_volume_previous contains -inf values'",
"_____no_output_____"
],
[
"assert sum(volume_previous_month.log_relative_volume_1.isna()) == 0, 'relative_volume_1 contains NA values'\nassert sum(volume_previous_month.log_relative_volume_1.isnull()) == 0, 'relative_volume_1 contains null values'\nassert sum(volume_previous_month.log_relative_volume_1 == np.inf) == 0, 'relative_volume_1 contains inf values'\nassert sum(volume_previous_month.log_relative_volume_1 == -np.inf) == 0, 'relative_volume_1 contains -inf values'",
"_____no_output_____"
],
[
"volume_previous_month['lag_log_relative_volume_previous'] = volume_previous_month.groupby(\n ['country', 'brand'])['log_relative_volume_previous'].shift(1)",
"_____no_output_____"
],
[
"volume_previous_month['lag_log_relative_volume_previous'] = np.where(\n volume_previous_month.month_num == 0,\n volume_previous_month.log_relative_volume_1,\n volume_previous_month.lag_log_relative_volume_previous\n)",
"_____no_output_____"
],
[
"volume_previous_month",
"_____no_output_____"
],
[
"cols = list(preds.columns) + ['lag_log_relative_volume_previous'] ",
"_____no_output_____"
],
[
"features = volume_previous_month.loc[:, cols]\ntarget = volume_previous_month.log_relative_volume_previous",
"_____no_output_____"
],
[
"categorical_cols = ['country', 'brand', 'therapeutic_area', 'presentation', 'month_name']\nte = TargetEncoder(cols=categorical_cols)\npipe2 = Pipeline([\n (\"te\", te),\n (\"imp\", SimpleImputer(strategy=\"mean\")),\n (\"sc\", StandardScaler()),\n (\"model\", Lasso(alpha=0.001, max_iter=2000))\n])",
"_____no_output_____"
],
[
"pipe2.fit(features, target)",
"_____no_output_____"
],
[
"def get_log_relative_volume_previous(model, features):\n features_copy = features.copy()\n features_copy.sort_values(by=['country', 'brand', 'month_num'], inplace=True)\n features_copy['log_relative_volume_previous'] = float('-inf')\n i=0\n \n for index, row in features_copy.iterrows():\n if(i%5000 == 0):\n print('Iteration:', i)\n country = row.country\n brand = row.brand\n month = row.month_num\n \n if month == 0:\n volume_1 = find_closest_volume(country, brand, 0, 1, np.mean)\n volume_2 = find_closest_volume(country, brand, -1, 1, np.mean)\n lag_log_relative_volume_previous = np.log((volume_1 + 1)/(volume_2+1))\n else:\n ind = (features_copy.country == country) &\\\n (features_copy.brand == brand) &\\\n (features_copy.month_num == month -1)\n lag_log_relative_volume_previous = features_copy.loc[ind, 'log_relative_volume_previous']\n\n row.at['lag_log_relative_volume_previous'] = lag_log_relative_volume_previous\n df = row.to_frame().T.drop(columns=['log_relative_volume_previous'])\n pred_val = model.predict(df)\n ind = (features_copy.brand == brand) & (features_copy.country == country) & (features_copy.month_num == month) \n features_copy.loc[ind, 'log_relative_volume_previous'] = pred_val[0]\n i+=1\n return features_copy",
"_____no_output_____"
],
[
"preds2 = get_log_relative_volume_previous(pipe2, preds)",
"_____no_output_____"
],
[
"preds2",
"_____no_output_____"
],
[
"features_df = preds2.loc[\n :, \n ['country', 'brand', 'month_num', 'volume_1', 'log_relative_volume', 'log_relative_volume_previous']].drop_duplicates()",
"_____no_output_____"
],
[
"features_df.to_csv(os.path.join(features_path, 'feat_02.csv'), index=False)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a23203a2065cc82e06061224f6b827fa6bb30d9
| 3,262 |
ipynb
|
Jupyter Notebook
|
titer_model/titered_output/model-performance/model-comparison.ipynb
|
blab/dengue
|
5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf
|
[
"CC-BY-4.0",
"MIT"
] | 4 |
2019-03-31T22:03:48.000Z
|
2020-06-16T21:04:24.000Z
|
titer_model/titered_output/model-performance/model-comparison.ipynb
|
emmahodcroft/dengue-antigenic-dynamics
|
5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf
|
[
"CC-BY-4.0",
"MIT"
] | 4 |
2018-10-12T02:13:10.000Z
|
2019-07-24T02:44:53.000Z
|
titer_model/titered_output/model-performance/model-comparison.ipynb
|
emmahodcroft/dengue-antigenic-dynamics
|
5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf
|
[
"CC-BY-4.0",
"MIT"
] | 5 |
2018-09-10T23:14:09.000Z
|
2020-12-27T20:57:34.000Z
| 26.737705 | 111 | 0.494788 |
[
[
[
"import pandas as pd\nfrom glob import glob\nimport seaborn as sns\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import stats\n%matplotlib inline",
"_____no_output_____"
],
[
"files = glob('*.csv')\n\ndef get_ci(a, confidence=0.95):\n n = len(a)\n m, se = np.mean(a), stats.sem(a)\n h = se * stats.t.ppf((1 + confidence) / 2., n-1)\n return m-h, m+h\n \n\nmodel_performance = []\nfor f in files:\n \n if 'sub' in f:\n model = 'Substitution'\n else:\n model = 'Tree'\n if 'fulltree' in f:\n resolution = 'Full tree'\n else:\n resolution = 'Interserotype'\n if 'nonorm' in f:\n normalization = 'No'\n else:\n normalization = 'Yes'\n \n performance = pd.read_csv(f)\n \n rmse_mean = performance['rms_error'].mean()\n rmse_ci = get_ci(performance['rms_error'])\n rmse_summary = '%.2f (%.2f, %.2f)'%(rmse_mean, rmse_ci[0], rmse_ci[1])\n \n r_sq_mean = performance['r_squared'].mean()\n r_sq_ci = get_ci(performance['r_squared'])\n r_sq_summary = '%.2f (%.2f, %.2f)'%(r_sq_mean, r_sq_ci[0], r_sq_ci[1])\n \n model_performance.append({\n 'Model': model,\n 'Resolution': resolution,\n r'$v_a$ and $p_b$': normalization,\n 'RMSE': rmse_summary,\n r'Pearson R^2': r_sq_summary\n })",
"_____no_output_____"
],
[
"model_performance = pd.DataFrame(model_performance)\nmodel_performance = model_performance.reindex(columns = ['Model', 'Resolution', r'$v_a$ and $p_b$', \\\n 'RMSE', r'Pearson R^2'])\nmodel_performance = model_performance.sort_values(['Model', 'Resolution', r'$v_a$ and $p_b$'])\nmodel_performance = model_performance.round(2)\ntable = model_performance.to_latex()\n\n",
"_____no_output_____"
],
[
"open('./model_performance_summary.txt', 'w').write(table)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a23290aef0ff5324b2cfc7f41382087e4639b14
| 46,956 |
ipynb
|
Jupyter Notebook
|
Class_Contents/Classes/00_intro_00_content.ipynb
|
Ha29/intro-to-python
|
49c8be9689ec7152ffb2121f335fe1cc3299def0
|
[
"MIT"
] | null | null | null |
Class_Contents/Classes/00_intro_00_content.ipynb
|
Ha29/intro-to-python
|
49c8be9689ec7152ffb2121f335fe1cc3299def0
|
[
"MIT"
] | null | null | null |
Class_Contents/Classes/00_intro_00_content.ipynb
|
Ha29/intro-to-python
|
49c8be9689ec7152ffb2121f335fe1cc3299def0
|
[
"MIT"
] | null | null | null | 58.768461 | 1,338 | 0.672438 |
[
[
[
"**Important**: Click on \"*Kernel*\" > \"*Restart Kernel and Clear All Outputs*\" *before* reading this chapter in [JupyterLab <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyterlab.readthedocs.io/en/stable/)",
"_____no_output_____"
],
[
"# An Introduction to Python and Programming\n\nThis course is a *thorough* introduction to programming in [Python <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://www.python.org/).\n\nIt teaches the concepts behind and the syntax of the core Python language as defined by the [Python Software Foundation <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://www.python.org/psf/) in the official [language reference <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/reference/index.html). Furthermore, it introduces commonly used functionalities from the [standard library <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/library/index.html) and popular third-party libraries like [numpy <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://www.numpy.org/), [pandas <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://pandas.pydata.org/), [matplotlib <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://matplotlib.org/), and others.\n\n<img src=\"static/logo.png\" width=\"15%\" align=\"left\">\n\n## Prerequisites\n\nThis course is suitable for *total beginners*, and there are *no* formal prerequisites. The student only needs to have:\n\n- a *solid* understanding of the **English language**,\n- knowledge of **basic mathematics** from high school,\n- the ability to **think conceptually** and **reason logically**, and\n- the willingness to **invest around time and effort on this course**.",
"_____no_output_____"
],
[
"## Objective\n\nThe **main goal** of this introduction is to **prepare** the student **for further studies** in the \"field\" of **data science**.\n\nThese include but are not limited to topics such as:\n- linear algebra\n- statistics & econometrics\n- data cleaning & wrangling\n- data visualization\n- data engineering (incl. SQL databases)\n- data mining (incl. web scraping)\n- feature generation, machine learning, & deep learning\n- optimization & (meta-)heuristics\n- algorithms & data structures\n- quantitative finance (e.g., option valuation)\n- quantitative marketing (e.g., customer segmentation)\n- quantitative supply chain management (e.g., forecasting)\n- management science & decision models\n- backend/API/web development (to serve data products to clients)",
"_____no_output_____"
],
[
"### Why data science?\n\nThe term **[data science <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Data_science)** is rather vague and does *not* refer to an academic discipline. Instead, the term was popularized by the tech industry, who also coined non-meaningful job titles such as \"[rockstar](https://www.quora.com/Why-are-engineers-called-rockstars-and-ninjas)\" or \"[ninja developers](https://www.quora.com/Why-are-engineers-called-rockstars-and-ninjas).\" Most *serious* definitions describe the field as being **multi-disciplinary** *integrating* scientific methods, algorithms, and systems thinking to extract knowledge from structured and unstructured data, *and* also emphasize the importance of **[domain knowledge <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Domain_knowledge)**.\n\nRecently, this integration aspect feeds back into the academic world. The [MIT](https://www.mit.edu/), for example, created the new [Stephen A. Schwarzman College of Computing](http://computing.mit.edu) for [artificial intelligence <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Artificial_intelligence) with a 1 billion dollar initial investment where students undergo a \"bilingual\" curriculum with half the classes in quantitative and method-centric fields - like the ones mentioned above - and the other half in domains such as biology, business, chemistry, politics, (art) history, or linguistics (cf., the [official Q&As](http://computing.mit.edu/faq/) or this [NYT article](https://www.nytimes.com/2018/10/15/technology/mit-college-artificial-intelligence.html)). Their strategists see a future where programming skills are just as naturally embedded into students' curricula as are nowadays subjects like calculus, statistics, or academic writing. Then, programming literacy is not just another \"nice to have\" skill but a prerequisite, or an enabler, to understanding more advanced topics in the actual domains studied. Top-notch researchers who use programming in their day-to-day lives could then teach students more efficiently in their \"language.\"",
"_____no_output_____"
],
[
"## Installation\n\nTo \"read\" this book in the most meaningful way, a working installation of **Python 3.7** or higher is expected.\n\nA popular and beginner-friendly way is to install the [Anaconda Distribution](https://www.anaconda.com/distribution/) that not only ships Python and the standard library but comes pre-packaged with a lot of third-party libraries from the so-called \"scientific stack.\" Just go to the [download](https://www.anaconda.com/download/) page and install the latest version (i.e., *2019-10* with Python 3.7 at the time of this writing) for your operating system.\n\nThen, among others, you will find an entry \"Anaconda Navigator\" in your start menu like below. Click on it.\n\n<img src=\"static/anaconda_start_menu.png\" width=\"30%\">\n\nA window opens showing you several applications that come with the Anaconda Distribution. Now, click on \"JupyterLab.\"\n\n<img src=\"static/anaconda_navigator.png\" width=\"50%\">\n\nA new tab in your web browser opens with the website being \"localhost\" and some number (e.g., 8888). This is the [JupyterLab <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyterlab.readthedocs.io/en/stable/) application that is used to display and run [Jupyter notebooks <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) as described below. On the left, you see the files and folders in your local user folder. This file browser works like any other. In the center, you have several options to launch (i.e., \"create\") new files.\n\n<img src=\"static/jupyter_lab.png\" width=\"50%\">",
"_____no_output_____"
],
[
"## Jupyter Notebooks\n\nThe document you are viewing is a so-called [Jupyter notebook <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html), a file format introduced by the [Jupyter Project <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_jp.png\">](https://jupyter.org/).\n\n\"Jupyter\" is an [acronym <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Acronym) derived from the names of the three major programming languages **[Julia](https://julialang.org/)**, **[Python <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://www.python.org)**, and **[R](https://www.r-project.org/)**, all of which play significant roles in the world of data science. The Jupyter Project's idea is to serve as an integrating platform such that different programming languages and software packages can be used together within the same project easily.\n\nFurthermore, Jupyter notebooks have become a de-facto standard for communicating and exchanging results in the data science community - both in academia and business - and provide an alternative to terminal-based ways of running Python (e.g., the default [Python interpreter <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/tutorial/interpreter.html) as shown below or a more advanced interactive version like [IPython <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://ipython.org/)) or a full-fledged [Integrated Development Environment <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Integrated_development_environment) (e.g., the commercial [PyCharm](https://www.jetbrains.com/pycharm/) or the free [Spyder <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_gh.png\">](https://github.com/spyder-ide/spyder) that comes with the Anaconda Distribution).\n\n<img src=\"static/terminal.png\" width=\"50%\">\n\nJupyter notebooks allow mixing formatted English with Python code in the same document. Text is formatted with the [Markdown <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_gh.png\">](https://guides.github.com/features/mastering-markdown/) language and mathematical formulas are typeset with [LaTeX](https://www.overleaf.com/learn/latex/Free_online_introduction_to_LaTeX_%28part_1%29). Moreover, we may include pictures, plots, and even videos. Because of these features, the notebooks developed for this book come in a self-contained \"tutorial\" style enabling students to learn and review the material on their own.",
"_____no_output_____"
],
[
"### Markdown vs. Code Cells\n\nA Jupyter notebook consists of cells that have a type associated with them. So far, only cells of type \"Markdown\" have been used, which is the default way to present formatted text.\n\nThe cell below is an example of a \"Code\" cell containing a line of actual Python code: It merely outputs the text \"Hello world\" when executed. To edit an existing code cell, enter into it with a mouse click. You know that you are \"in\" a code cell when its frame is highlighted in blue.\n\nBesides this **edit mode**, there is also a so-called **command mode** that you reach by hitting the \"Escape\" key *after* entering a code cell, which un-highlights the frame. Using the \"Enter\" and \"Escape\" keys, you can now switch between the two modes.\n\nTo *execute*, or \"*run*,\" a code cell, hold the \"Control\" key and press \"Enter.\" Note that you do *not* go to the subsequent cell. Alternatively, you can hold the \"Shift\" key and press \"Enter,\" which executes the cell *and* places your focus on the subsequent cell.\n\nSimilarly, a Markdown cell is also in either edit or command mode. For example, double-click on the text you are reading: This puts you into edit mode. Now, you could change the formatting (e.g., make a word printed in *italics* or **bold** with single or double asterisks) and \"execute\" the cell to render the text as specified.\n\nTo change a cell's type, choose either \"Code\" or \"Markdown\" in the navigation bar at the top. Alternatively, you can hit either the \"Y\" or \"M\" key on your keyboard when in command mode to make the focused cell a code or markdown cell.",
"_____no_output_____"
]
],
[
[
"print(\"Hello world\")",
"Hello world\n"
]
],
[
[
"Sometimes, a code cell starts with an exclamation mark `!`. Then, the Jupyter notebook behaves as if the following command were typed directly into a terminal. The cell below asks `python` to show its version number and is *not* Python code but a command in the [Shell <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Shell_%28computing%29) language. The `!` is useful to execute short commands without leaving a Jupyter notebook.",
"_____no_output_____"
]
],
[
[
"!python --version",
"Python 3.7.4\n"
]
],
[
[
"## Why Python?\n\n### What is Python?\n\nHere is a brief history of and some background on Python (cf., also this [TechRepublic article](https://www.techrepublic.com/article/python-is-eating-the-world-how-one-developers-side-project-became-the-hottest-programming-language-on-the-planet/) for a more elaborate story):\n\n- [Guido van Rossum <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Guido_van_Rossum) (Python’s **[Benevolent Dictator for Life <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Benevolent_dictator_for_life)**) was bored during a week around Christmas 1989 and started Python as a hobby project \"that would keep \\[him\\] occupied\" for some days\n- the idea was to create a **general-purpose** scripting **language** that would allow fast *prototyping* and would *run on every operating system*\n- Python grew through the 90s as van Rossum promoted it via his \"Computer Programming for Everybody\" initiative that had the *goal to encourage a basic level of coding literacy* as an equal knowledge alongside English literacy and math skills\n- to become more independent from its creator, the next major version **Python 2** - released in 2000 and still in heavy use as of today - was **open-source** from the get-go which attracted a *large and global community of programmers* that *contributed* their expertise and best practices in their free time to make Python even better\n- **Python 3** resulted from a significant overhaul of the language in 2008 taking into account the *learnings from almost two decades*, streamlining the language, and getting ready for the age of **big data**\n- the language is named after the sketch comedy group [Monty Python <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Monty_Python)\n\n#### Summary\n\nPython is a **general-purpose** programming **language** that allows for *fast development*, is *easy to read*, **open-source**, long-established, unifies the knowledge of *hundreds of thousands of experts* around the world, runs on basically every machine, and can handle the complexities of applications involving **big data**.\n\n### Isn't C a lot faster?\n\nWhile it is true that a language like C is a lot faster than Python when it comes to *pure* **computation time**, this does not matter in many cases as the *significantly shorter* **development cycles** are the more significant cost factor in a rapidly changing world.",
"_____no_output_____"
],
[
"### Who uses it?\n\n<img src=\"static/logos.png\" width=\"70%\">\n\nWhile ad-hominem arguments are usually not the best kind of reasoning, we briefly look at some examples of who uses Python and leave it up to the reader to decide if this is convincing or not:\n\n- **[Massachusetts Institute of Technology](https://www.mit.edu/)**\n - teaches Python in its [introductory course](https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-0001-introduction-to-computer-science-and-programming-in-python-fall-2016/) to computer science independent of the student's major\n - replaced the infamous course on the [Scheme](https://groups.csail.mit.edu/mac/projects/scheme/) language (cf., [source <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_hn.png\">](https://news.ycombinator.com/item?id=602307))\n- **[Google](https://www.google.com/)**\n - used the strategy \"Python where we can, C++ where we must\" from its early days on to stay flexible in a rapidly changing environment (cf., [source <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_so.png\">](https://stackoverflow.com/questions/2560310/heavy-usage-of-python-at-google))\n - the very first web-crawler was written in Java and so difficult to maintain that it was rewritten in Python right away (cf., [source](https://www.amazon.com/Plex-Google-Thinks-Works-Shapes/dp/1416596585/ref=sr_1_1?ie=UTF8&qid=1539101827&sr=8-1&keywords=in+the+plex))\n - Guido van Rossom was hired by Google from 2005 to 2012 to advance the language there\n- **[NASA](https://www.nasa.gov/)** open-sources many of its projects, often written in Python and regarding analyses with big data (cf., [source](https://code.nasa.gov/language/python/))\n- **[Facebook](https://facebook.com/)** uses Python besides C++ and its legacy PHP (a language for building websites; the \"cool kid\" from the early 2000s)\n- **[Instagram](https://instagram.com/)** operates the largest installation of the popular **web framework [Django](https://www.djangoproject.com/)** (cf., [source](https://instagram-engineering.com/web-service-efficiency-at-instagram-with-python-4976d078e366))\n- **[Spotify](https://spotify.com/)** bases its data science on Python (cf., [source](https://labs.spotify.com/2013/03/20/how-we-use-python-at-spotify/))\n- **[Netflix](https://netflix.com/)** also runs its predictive models on Python (cf., [source](https://medium.com/netflix-techblog/python-at-netflix-86b6028b3b3e))\n- **[Dropbox](https://dropbox.com/)** \"stole\" Guido van Rossom from Google to help scale the platform (cf., [source](https://medium.com/dropbox-makers/guido-van-rossum-on-finding-his-way-e018e8b5f6b1))\n- **[JPMorgan Chase](https://www.jpmorganchase.com/)** requires new employees to learn Python as part of the onboarding process starting with the 2018 intake (cf., [source](https://www.ft.com/content/4c17d6ce-c8b2-11e8-ba8f-ee390057b8c9?segmentId=a7371401-027d-d8bf-8a7f-2a746e767d56))\n\nAs images tell more than words, here are two plots of popular languages' \"market shares\" based on the number of questions asked on [Stack Overflow <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_so.png\">](https://stackoverflow.blog/2017/09/06/incredible-growth-python/), the most relevant platform for answering programming-related questions: As of late 2017, Python surpassed [Java](https://www.java.com/en/), heavily used in big corporates, and [JavaScript](https://developer.mozilla.org/en-US/docs/Web/JavaScript), the \"language of the internet\" that does everything in web browsers, in popularity. Two blog posts from \"technical\" people explain this in more depth to the layman: [Stack Overflow <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_so.png\">](https://stackoverflow.blog/2017/09/14/python-growing-quickly/) and [DataCamp](https://www.datacamp.com/community/blog/python-scientific-computing-case).\n\n<img src=\"static/growth_major_languages.png\" width=\"50%\">\n\nAs the graph below shows, neither Google's very own language **[Go](https://golang.org/)** nor **[R](https://www.r-project.org/)**, a domain-specific language in the niche of statistics, can compete with Python's year-to-year growth.\n\n<img src=\"static/growth_smaller_languages.png\" width=\"50%\">\n\n[IEEE Sprectrum](https://spectrum.ieee.org/computing/software/the-top-programming-languages-2019) provides a more recent comparison of programming language's popularity. Even news and media outlets notice the recent popularity of Python: [Economist](https://www.economist.com/graphic-detail/2018/07/26/python-is-becoming-the-worlds-most-popular-coding-language), [Huffington Post](https://www.huffingtonpost.com/entry/why-python-is-the-best-programming-language-with-which_us_59ef8f62e4b04809c05011b9), [TechRepublic](https://www.techrepublic.com/article/why-python-is-so-popular-with-developers-3-reasons-the-language-has-exploded/), and [QZ](https://qz.com/1408660/the-rise-of-python-as-seen-through-a-decade-of-stack-overflow/).",
"_____no_output_____"
],
[
"## Contents\n\n- *Chapter 0*: Introduction\n- **Part A: Expressing Logic**\n - *Chapter 1*: Elements of a Program\n - *Chapter 2*: Functions & Modularization\n - *Chapter 3*: Conditionals & Exceptions\n - *Chapter 4*: Recursion & Looping",
"_____no_output_____"
],
[
"## How to learn Programming\n\nDo you remember how you first learned to speak in your mother tongue? Probably not. \n\nYour earliest memory as a child should probably be around the age of three or four years old when you could already say simple things and interact with your environment. \n\nAlthough you did not know any grammar rules yet, other people just understood what you said. At least most of the time.\n\nIt is intuitively best to take the very mindset of a small child when learning a new language. And a programming language is no different from that. \n\nThis first chapter introduces simplistic examples and we accept them as they are *without* knowing any of the \"grammar\" rules yet. Then, we analyze them in parts and slowly build up our understanding.\n\nConsequently, if parts of this chapter do not make sense right away, let's not worry too much. Besides introducing the basic elements, it also serves as an outlook for what is to come. So, many terms and concepts used here are deconstructed in great detail in the following chapters.",
"_____no_output_____"
],
[
"## Example: Averaging all even Numbers in a List\n\nAs our introductory example, we want to calculate the *average* of all *evens* in a **list** of whole numbers: `[7, 11, 8, 5, 3, 12, 2, 6, 9, 10, 1, 4]`.\n\nWhile we are used to finding an [analytical solution](https://math.stackexchange.com/questions/935405/what-s-the-difference-between-analytical-and-numerical-approaches-to-problems/935446#935446) in math (i.e., derive some equation with \"pen and paper\"), we solve this task *programmatically* instead.\n\nWe start by creating a list called `numbers` that holds all the individual numbers between **brackets** `[` and `]`.",
"_____no_output_____"
]
],
[
[
"numbers = [7, 11, 8, 5, 3, 12, 2, 6, 9, 10, 1, 4]",
"_____no_output_____"
]
],
[
[
"To verify that something happened in our computer's memory, we **reference** `numbers`.",
"_____no_output_____"
]
],
[
[
"numbers",
"_____no_output_____"
]
],
[
[
"So far, so good. Let's see how the desired **computation** could be expressed as a **sequence of instructions** in the next code cell.\n\nIntuitively, the line `for number in numbers` describes a \"loop\" over all the numbers in the `numbers` list, one at a time.\n\nThe `if number % 2 == 0` may look confusing at first sight. Both `%` and `==` must have an unintuitive meaning here. Luckily, the **comment** in the same line after the `#` symbol has the answer: The program does something only for an even `number`.\n\nIn particular, it increases `count` by `1` and adds the current `number` onto the [running <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Running_total) `total`. Both `count` and `number` are **initialized** to `0` and the single `=` symbol reads as \"... is *set* equal to ...\". It cannot indicate a mathematical equation as, for example, `count` is generally *not* equal to `count + 1`.\n\nLastly, the `average` is calculated as the ratio of the final **values** of `total` and `count`. Overall, we divide the sum of all even numbers by their count: This is nothing but the definition of an average.\n\nThe lines of code \"within\" the `for` and `if` **statements** are **indented** and aligned with multiples of *four spaces*: This shows immediately how the lines relate to each other.",
"_____no_output_____"
]
],
[
[
"count = 0 # initialize variables to keep track of the\ntotal = 0 # running total and the count of even numbers\n\nfor number in numbers:\n if number % 2 == 0: # only work with even numbers\n count = count + 1\n total = total + number\n\naverage = total / count",
"_____no_output_____"
]
],
[
[
"We do not see any **output** yet but obtain the value of `average` by referencing it again.",
"_____no_output_____"
]
],
[
[
"average",
"_____no_output_____"
]
],
[
[
"## Output in a Jupyter Notebook\n\nOnly two of the previous four code cells generate an **output** while two remained \"silent\" (i.e., nothing appears below the cell after running it).\n\nBy default, Jupyter notebooks only show the value of the **expression** in the last line of a code cell. And, this output may also be suppressed by ending the last line with a semicolon `;`.",
"_____no_output_____"
]
],
[
[
"\"Hello, World!\"\n\"I am feeling great :-)\"",
"_____no_output_____"
],
[
"\"I am invisible!\";",
"_____no_output_____"
]
],
[
[
"To see any output other than that, we use the built-in [print() <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/3/library/functions.html#print) **function**. Here, the parentheses `()` indicate that we **call** (i.e., \"execute\") code written somewhere else.",
"_____no_output_____"
]
],
[
[
"print(\"Hello, World!\")\nprint(\"I am feeling great :-)\")",
"Hello, World!\nI am feeling great :-)\n"
]
],
[
[
"Outside Jupyter notebooks, the semicolon `;` is used as a **separator** between statements that must otherwise be on a line on their own. However, it is *not* considered good practice to use it as it makes code less readable.",
"_____no_output_____"
]
],
[
[
"print(\"Hello, World!\"); print(\"I am feeling great :-)\")",
"Hello, World!\nI am feeling great :-)\n"
]
],
[
[
"### Jupyter Notebook Aspects\n\n#### The Order of Code Cells is arbitrary\n\nWe can run the code cells in a Jupyter notebook in *any* arbitrary order.\n\nThat means, for example, that a variable defined towards the bottom could accidentally be referenced at the top of the notebook. This happens quickly when we iteratively built a program and go back and forth between cells.\n\nAs a good practice, it is recommended to click on \"Kernel\" > \"Restart Kernel and Run All Cells\" in the navigation bar once a notebook is finished. That restarts the Python process forgetting all **state** (i.e., all variables) and ensures that the notebook runs top to bottom without any errors the next time it is opened.\n\n#### Notebooks are linear\n\nWhile this book is built with Jupyter notebooks, it is crucial to understand that \"real\" programs are almost never \"linear\" (i.e., top to bottom) sequences of instructions but instead may take many different **flows of execution**.\n\nAt the same time, for a beginner's course, it is often easier to code linearly.\n\nIn real data science projects, one would probably employ a mixed approach and put reusable code into so-called Python modules (i.e., *.py* files; cf., Chapter 2) and then use Jupyter notebooks to build up a linear report or storyline for an analysis.",
"_____no_output_____"
],
[
"## How to learn Programming\n\n### ABC Rule\n\n**A**lways **b**e **c**oding.\n\nProgramming is more than just writing code into a text file. It means reading through parts of the [documentation <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_py.png\">](https://docs.python.org/), blogs with best practices, and tutorials, or researching problems on [Stack Overflow <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_so.png\">](https://stackoverflow.com/) while trying to implement features in the application at hand. Also, it means using command-line tools to automate some part of the work or manage different versions of a program, for example, with **[git](https://git-scm.com/)**. In short, programming involves a lot of \"muscle memory,\" which can only be built and kept up through near-daily usage.\n\nFurther, many aspects of software architecture and best practices can only be understood after having implemented some requirements for the very first time. Coding also means \"breaking\" things to find out what makes them work in the first place.\n\nTherefore, coding is learned best by just doing it for some time on a daily or at least a regular basis and not right before some task is due, just like learning a \"real\" language.\n\n### The Maker's Schedule\n\n[Y Combinator <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_hn.png\">](https://www.ycombinator.com/) co-founder [Paul Graham <img height=\"12\" style=\"display: inline-block\" src=\"static/link_to_wiki.png\">](https://en.wikipedia.org/wiki/Paul_Graham_%28programmer%29) wrote a very popular and often cited [article](http://www.paulgraham.com/makersschedule.html) where he divides every person into belonging to one of two groups:\n\n- **Managers**: People that need to organize things and command others (e.g., a \"boss\" or manager). Their schedule is usually organized by the hour or even 30-minute intervals.\n- **Makers**: People that create things (e.g., programmers, artists, or writers). Such people think in half days or full days.\n\nHave you ever wondered why so many tech people work during nights and sleep at \"weird\" times? The reason is that many programming-related tasks require a \"flow\" state in one's mind that is hard to achieve when one can get interrupted, even if it is only for one short question. Graham describes that only knowing that one has an appointment in three hours can cause a programmer to not get into a flow state.\n\nAs a result, do not set aside a certain amount of time for learning something but rather plan in an *entire evening* or a *rainy Sunday* where you can work on a problem in an *open end* setting. And do not be surprised anymore to hear \"I looked at it over the weekend\" from a programmer.\n\n### Phase Iteration\n\nWhen being asked the above question, most programmers answer something that can be classified into one of two broader groups.\n\n**1) Toy Problem, Case Study, or Prototype**: Pick some problem, break it down into smaller sub-problems, and solve them with an end in mind.\n\n**2) Books, Video Tutorials, and Courses**: Research the best book, blog, video, or tutorial for something and work it through from start to end.\n\nThe truth is that you need to iterate between these two phases.\n\nBuilding a prototype always reveals issues no book or tutorial can think of before. Data is never as clean as it should be. An algorithm from a textbook must be adapted to a peculiar aspect of a case study. It is essential to learn to \"ship a product\" because only then will one have looked at all the aspects.\n\nThe major downside of this approach is that one likely learns bad \"patterns\" overfitted to the case at hand, and one does not get the big picture or mental concepts behind a solution. This gap can be filled in by well-written books: For example, check the Python/programming books offered by [Packt](https://www.packtpub.com/packt/offers/free-learning/) or [O’Reilly](https://www.oreilly.com/).",
"_____no_output_____"
],
[
"## HackerRank\n\nHackerRank is a wonderful online platform which contains numerous online coding tests for student to practice their coding skills. Software companies also use HackerRank technical assessment and remote interview solution for hiring developers. Student will see a coding prolem in HackerRank in a form of problem description, sample input and expected output. \n\n<img src=\"static/HackerRankProblem.png\" width=\"60%\">\n\nThe task is writing the code according to problem description so that the code will take the sample input and print out the expected output. \n\n<img src=\"static/HackerRank_submit.png\" width=\"60%\">\n\nOur course target is completing some (may not all) [HackerRank Python problems](https://www.hackerrank.com/domains/python?filters%5Bsubdomains%5D%5B%5D=py-introduction). In order to do that, please register your account in HackerRank in the link below:\n[HackerRank SignUp](\nhttps://www.hackerrank.com/auth/signup?h_l=body_middle_left_button&h_r=sign_up)",
"_____no_output_____"
],
[
"## Comments\n\nWe use the `#` symbol to write comments in plain English right into the code.\n\nAs a good practice, comments should *not* describe *what* happens. This should be evident by reading the code. Otherwise, it is most likely badly written code. Rather, comments should describe *why* something happens.\n\nComments may be added either at the end of a line of code, by convention separated with two spaces, or on a line on their own.",
"_____no_output_____"
]
],
[
[
"distance = 891 # in meters\nelapsed_time = 93 # in seconds\n\n# Calculate the speed in km/h.\nspeed = 3.6 * distance / elapsed_time",
"_____no_output_____"
]
],
[
[
"But let's think wisely if we need to use a comment.\nThe second cell is a lot more Pythonic.",
"_____no_output_____"
]
],
[
[
"seconds = 365 * 24 * 60 * 60 # = seconds in the year",
"_____no_output_____"
],
[
"seconds_per_year = 365 * 24 * 60 * 60",
"_____no_output_____"
]
],
[
[
"## TL;DR\n\nWe end each chapter with a summary of the main points (i.e., **TL;DR** = \"too long; didn't read\"). \n- program\n - **sequence** of **instructions** that specify how to perform a computation (= a \"recipe\")\n - a \"black box\" that processes **inputs** and transforms them into meaningful **outputs** in a *deterministic* way\n - conceptually similar to a mathematical function $f$ that maps some input $x$ to an output $y = f(x)$\n\n- comments\n - **prose** supporting a **human's understanding** of the program\n - ignored by Python\n\n",
"_____no_output_____"
],
[
"## Further readings [Miller et al., 2013]: \n\n#### Algorithms\n\nGiven a problem, a computer scientist’s goal is to develop an algorithm, a step-by-step list of instructions for solving any instance of the problem that might arise. Algorithms are finite processes that if followed will solve the problem. Algorithms are solutions.\n\nWe say that a problem is computable if an algorithm exists for solving it. An alternative\ndefinition for computer science, then, is to say that computer science is the study of problems that are and that are not computable, the study of the existence and the nonexistence of algorithms.\n\nIn any case, you will note that the word “computer” did not come up at all. Solutions\nare considered independent from the machine.\n\n#### Abstraction\n\nComputer science, as it pertains to the problem-solving process itself, is also the study of abstraction. Abstraction allows us to view the problem and solution in such a way as to separate the so-called logical and physical perspectives.\n\nConsider the automobile that you may have driven to school or work today. As a driver, a user of the car, you have certain interactions that take place in order to utilize the car for its intended purpose. You get in, insert the key, start the car, shift, brake, accelerate, and steer in order to drive. From an abstraction point of view, we can say that you are seeing the logical perspective of the automobile. You are using the functions provided by the car designers for the purpose of transporting you from one location to another. These functions are sometimes also referred to as the interface.\n\nOn the other hand, the mechanic who must repair your automobile takes a very different point of view. She not only knows how to drive but must know all of the details necessary to carry out all the functions that we take for granted. She needs to understand how the engine works, how the transmission shifts gears, how temperature is controlled, and so on. This is known as the physical perspective, the details that take place “under the hood.”\n\nThe same thing happens when we use computers. Most people use computers to write documents, send and receive email, surf the web, play music, store images, and play games without any knowledge of the details that take place to allow those types of applications to work. They view computers from a logical or user perspective. Computer scientists, programmers, technology support staff, and system administrators take a very different view of the computer. They must know the details of how operating systems work, how network protocols are configured, and how to code various scripts that control function. They must be able to control the low-level details that a user simply assumes.\n\nThe common point for both of these examples is that the user of the abstraction, sometimes also called the client, does not need to know the details as long as the user is aware of the way the interface works. This interface is the way we as users communicate with the underlying complexities of the implementation.\n\n<img src=\"static/abstraction.png\" width=\"60%\">\n\nPython code:",
"_____no_output_____"
]
],
[
[
"import math\nmath.sqrt(16)\n",
"_____no_output_____"
]
],
[
[
"This is an example of procedural abstraction. We do not necessarily know how the square root is being calculated, but we know what the function is called and how to use it. If we perform the import correctly, we can assume that the function will provide us with the correct results.\nWe know that someone implemented a solution to the square root problem but we only\nneed to know how to use it. This is sometimes referred to as a “black box” view of a process. We simply describe the interface: the name of the function, what is needed (the parameters), and what will be returned. The details are hidden inside.\n",
"_____no_output_____"
],
[
"#### Why Study Algorithms?\n\nComputer scientists learn by experience. We learn by seeing others solve problems and by solving problems by ourselves. Being exposed to different problem-solving techniques and seeing how different algorithms are designed helps us to take on the next challenging problem that we are given. By considering a number of different algorithms, we can begin to develop pattern recognition so that the next time a similar problem arises, we are better able to solve it.\n\nAlgorithms are often quite different from one another. Consider the example of sqrt seen earlier. It is entirely possible that there are many different ways to implement the details to compute the square root function. One algorithm may use many fewer resources than another. One algorithm might take 10 times as long to return the result as the other. We would like to have some way to compare these two solutions. Even though they both work, one is perhaps “better” than the other. We might suggest that one is more efficient or that one simply works faster or uses less memory. As we study algorithms, we can learn analysis techniques that allow us to compare and contrast solutions based solely on their own characteristics, not the characteristics of the program or computer used to implement them.\n\nIn the worst case scenario, we may have a problem that is intractable, meaning that there is no algorithm that can solve the problem in a realistic amount of time. It is important to be able to distinguish between those problems that have solutions, those that do not, and those where solutions exist but require too much time or other resources to work reasonably.\n\nPython is a modern, easy-to-learn, object-oriented programming language. It has a powerful set of built-in data types and easy-to-use control constructs.\n\n### References: \n1. Miller, Brad, and David Ranum. \"Problem solving with algorithms and data structures.\" (2013).",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a232d3d3ffdc468a22996f475f193c4d67d7809
| 75,316 |
ipynb
|
Jupyter Notebook
|
Scikit-Learn.ipynb
|
kaiicheng/NBA
|
21de2f1f4783465d159b81b44b0ec88c7f757ec2
|
[
"MIT"
] | null | null | null |
Scikit-Learn.ipynb
|
kaiicheng/NBA
|
21de2f1f4783465d159b81b44b0ec88c7f757ec2
|
[
"MIT"
] | null | null | null |
Scikit-Learn.ipynb
|
kaiicheng/NBA
|
21de2f1f4783465d159b81b44b0ec88c7f757ec2
|
[
"MIT"
] | null | null | null | 30.905211 | 114 | 0.330727 |
[
[
[
"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom tensorflow.keras import datasets\nfrom pyvizml import CreateNBAData\nimport requests\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.datasets import load_boston\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.datasets import make_classification\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler",
"_____no_output_____"
],
[
"cnb = CreateNBAData(2019)\nplayers = cnb.create_players_df()\nX = players['heightMeters'].values.reshape(-1, 1)\ny = players['weightKilograms'].values",
"Creating players df...\n"
],
[
"cnb",
"_____no_output_____"
],
[
"players",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"y",
"_____no_output_____"
],
[
"X.shape\ny.shape\nX_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33, random_state=42)",
"_____no_output_____"
],
[
"print(X_train.shape)\nprint(X_valid.shape)\nprint(y_train.shape)\nprint(y_valid.shape)",
"(341, 1)\n(169, 1)\n(341,)\n(169,)\n"
],
[
"ss = StandardScaler()\nlr = LinearRegression()",
"_____no_output_____"
],
[
"type(ss) # transformer\ntype(lr) # predictor",
"_____no_output_____"
],
[
"pipeline = Pipeline([('scaler', ss), ('lr', lr)])\ntype(pipeline)",
"_____no_output_____"
],
[
"pipeline.fit(X_train, y_train)",
"_____no_output_____"
],
[
"pipeline.predict(X_valid)",
"_____no_output_____"
],
[
"lr.coef_",
"_____no_output_____"
],
[
"lr.intercept_",
"_____no_output_____"
],
[
"X_valid[0, :]",
"_____no_output_____"
],
[
"ss.transform(np.array([2.03]).reshape(-1, 1)) * 8.53366403 + lr.intercept_",
"_____no_output_____"
],
[
"X = players[['heightFeet', 'heightInches']].values.astype(float)\nX.shape",
"_____no_output_____"
],
[
"X",
"_____no_output_____"
],
[
"print(X.ndim)\nprint(X.shape)",
"2\n(510, 2)\n"
],
[
"y",
"_____no_output_____"
],
[
"print(y.ndim)\nprint(y.shape)",
"1\n(510,)\n"
],
[
"poly = PolynomialFeatures()\ntype(poly)",
"_____no_output_____"
],
[
"X_before_poly = X.copy()\nX_before_poly.shape",
"_____no_output_____"
],
[
"X_after_poly = poly.fit_transform(X)\nX_after_poly.shape",
"_____no_output_____"
],
[
"X_before_poly[:10, :]",
"_____no_output_____"
],
[
"X_after_poly[:10, :]",
"_____no_output_____"
],
[
"poly = PolynomialFeatures(degree=1)\nX_after_poly = poly.fit_transform(X)\nX_after_poly.shape",
"_____no_output_____"
],
[
"X_after_poly",
"_____no_output_____"
],
[
"poly = PolynomialFeatures(degree=3)\nX_after_poly = poly.fit_transform(X)\nX_after_poly.shape",
"_____no_output_____"
],
[
"X_after_poly",
"_____no_output_____"
],
[
"X_before_scaled = X.copy()\nms = MinMaxScaler()\nss = StandardScaler()",
"_____no_output_____"
],
[
"X_before_poly[:10, :]",
"_____no_output_____"
],
[
"max_val = X[:, 0].max() # 7\nmin_val = X[:, 0].min() # 5\n(7-6)/(7-5)",
"_____no_output_____"
],
[
"X_after_ms = ms.fit_transform(X_before_scaled)\nprint(X_after_ms[:10])",
"[[0.5 0. ]\n [0.5 1. ]\n [0.5 0.81818182]\n [0.5 1. ]\n [0.5 0.90909091]\n [0.5 0.45454545]\n [0.5 0.36363636]\n [0.5 1. ]\n [0.5 0.72727273]\n [0.5 0.45454545]]\n"
],
[
"mean_val = X[:, 0].max() # 7\nstd_val = X[:, 0].min() # 5\n(7 - 6) / (7-5)",
"_____no_output_____"
],
[
"X_after_ss = ss.fit_transform(X_before_scaled)\nprint(X_after_ss[:10])",
"[[-0.15058117 -1.89979194]\n [-0.15058117 1.62929176]\n [-0.15058117 0.98764018]\n [-0.15058117 1.62929176]\n [-0.15058117 1.30846597]\n [-0.15058117 -0.29566298]\n [-0.15058117 -0.61648877]\n [-0.15058117 1.62929176]\n [-0.15058117 0.66681439]\n [-0.15058117 -0.29566298]]\n"
],
[
"train = pd.read_csv(\"https://kaggle-getting-started.s3-ap-northeast-1.amazonaws.com/titanic/train.csv\")\ntest = pd.read_csv(\"https://kaggle-getting-started.s3-ap-northeast-1.amazonaws.com/titanic/test.csv\")\nprint(train.shape)\nprint(test.shape)",
"(891, 12)\n(418, 11)\n"
],
[
"train.columns.difference(test.columns)",
"_____no_output_____"
],
[
"players.iloc[:5, :4]",
"_____no_output_____"
],
[
"players_train, players_valid = train_test_split(players, test_size=0.3, random_state=42)",
"_____no_output_____"
],
[
"print(players_train.shape)\nprint(players_valid.shape)\n153 / (153+357)",
"(357, 20)\n(153, 20)\n"
],
[
"players_train.iloc[:5, :4]",
"_____no_output_____"
],
[
"players_valid.iloc[:5, :4]",
"_____no_output_____"
],
[
"def trainTestSplit(df, test_size, random_state):\n df_index = df.index.values.copy()\n m = df_index.size\n np.random.seed(random_state)\n np.random.shuffle(df_index)\n test_index = int(np.ceil(m * test_size))\n test_indices = df_index[:test_index]\n train_indices = df_index[test_index:]\n df_valid = df.loc[test_indices, :]\n df_train = df.loc[train_indices, :]\n return df_train, df_valid",
"_____no_output_____"
],
[
"players_train, players_valid = trainTestSplit(players, test_size=0.3, random_state=42)",
"_____no_output_____"
],
[
"players_train.iloc[:5, :4]",
"_____no_output_____"
],
[
"players_valid.iloc[:5, :4]",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2332a65dbb248f3150db25e5e7818d92d231e7
| 153,594 |
ipynb
|
Jupyter Notebook
|
matplotlibTUT/plt10_scatter.ipynb
|
ilray88/tutorials
|
0182234002233179ab5c9a0d4ba230e5aa5f170e
|
[
"MIT"
] | null | null | null |
matplotlibTUT/plt10_scatter.ipynb
|
ilray88/tutorials
|
0182234002233179ab5c9a0d4ba230e5aa5f170e
|
[
"MIT"
] | null | null | null |
matplotlibTUT/plt10_scatter.ipynb
|
ilray88/tutorials
|
0182234002233179ab5c9a0d4ba230e5aa5f170e
|
[
"MIT"
] | null | null | null | 1,806.988235 | 151,659 | 0.96173 |
[
[
[
"# View more python tutorials on my Youtube and Youku channel!!!\n\n# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg\n# Youku video tutorial: http://i.youku.com/pythontutorial\n\n# 10 - scatter\n\"\"\"\nPlease note, this script is for python3+.\nIf you are using python2+, please modify it accordingly.\nTutorial reference:\nhttp://www.scipy-lectures.org/intro/matplotlib/matplotlib.html\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nn = 1024 # data size\nX = np.random.normal(0, 1, n)\nY = np.random.normal(0, 1, n)\nT = np.arctan2(Y, X) # for color later on\n\nplt.scatter(X, Y, s=75, c=T, alpha=.5)\n\nplt.xlim(-1.5, 1.5)\nplt.xticks(()) # ignore xticks\nplt.ylim(-1.5, 1.5)\nplt.yticks(()) # ignore yticks\n\nplt.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a2335121ec64b15e002e388ba3a0f002a2ad9f8
| 4,491 |
ipynb
|
Jupyter Notebook
|
src/imjoy_viewer.ipynb
|
TheJacksonLaboratory/HCS_visualization_tools
|
7a29aa7d006772acb5fd12d65e5f6a2ebdc95f5d
|
[
"MIT"
] | 1 |
2022-02-23T23:02:37.000Z
|
2022-02-23T23:02:37.000Z
|
src/imjoy_viewer.ipynb
|
TheJacksonLaboratory/HCS_visualization_tools
|
7a29aa7d006772acb5fd12d65e5f6a2ebdc95f5d
|
[
"MIT"
] | null | null | null |
src/imjoy_viewer.ipynb
|
TheJacksonLaboratory/HCS_visualization_tools
|
7a29aa7d006772acb5fd12d65e5f6a2ebdc95f5d
|
[
"MIT"
] | null | null | null | 24.275676 | 96 | 0.490537 |
[
[
[
"import zarr\nimport dask_zarr as dz",
"_____no_output_____"
]
],
[
[
"### Modify the url to point to the correct location of the zarr file",
"_____no_output_____"
]
],
[
[
"z_url = r\"/mnt/KOMP_C8565_1.zarr\"",
"_____no_output_____"
],
[
"z = zarr.open(z_url, mode=\"r\") # open the zarr created above in jupyter kernel",
"_____no_output_____"
]
],
[
[
"### Set up the ImJoy viewer extension",
"_____no_output_____"
]
],
[
[
"from imjoy import api\nimport zarr\n\n\ndef encode_zarr_store(zobj):\n path_prefix = f\"{zobj.path}/\" if zobj.path else \"\"\n\n def getItem(key, options = None):\n return zobj.store[path_prefix + key]\n\n def setItem(key, value):\n zobj.store[path_prefix + key] = value\n\n def containsItem(key, options = None):\n if path_prefix + key in zobj.store:\n return True\n\n return {\n \"_rintf\": True,\n \"_rtype\": \"zarr-array\" if isinstance(zobj, zarr.Array) else \"zarr-group\",\n \"getItem\": getItem,\n \"setItem\": setItem,\n \"containsItem\": containsItem,\n }\n\n\napi.registerCodec(\n {\"name\": \"zarr-array\", \"type\": zarr.Array, \"encoder\": encode_zarr_store}\n)\napi.registerCodec(\n {\"name\": \"zarr-group\", \"type\": zarr.Group, \"encoder\": encode_zarr_store}\n)\n\n\nclass Plugin:\n def __init__(self, images, view_state=None):\n if not isinstance(images, list):\n images = [images]\n self.images = images\n self.view_state = view_state\n\n async def setup(self):\n pass\n\n async def run(self, ctx):\n viewer = await api.createWindow(\n type=\"vizarr\", src=\"https://hms-dbmi.github.io/vizarr\"\n )\n if self.view_state:\n await viewer.set_view_state(self.view_state)\n for img in self.images:\n await viewer.add_image(img)\n\n\ndef run_vizarr(images, view_state=None):\n api.export(Plugin(images, view_state))",
"_____no_output_____"
]
],
[
[
"### Access to the group '0' of the zarr file an visualize it",
"_____no_output_____"
]
],
[
[
"# Create Zarr \nimg = { \"source\": z['0'], \"name\": \"KOMP_test_1\" }\n\n# Run vizarr\nrun_vizarr(img)",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a2338ef45a6ded2f64a6cc1ae0e12244361422c
| 76,780 |
ipynb
|
Jupyter Notebook
|
Analisando_dados_abertos.ipynb
|
jjanuario/datamining
|
bfe39419c65c246a9012afc454c372e3db9cab82
|
[
"BSD-3-Clause"
] | null | null | null |
Analisando_dados_abertos.ipynb
|
jjanuario/datamining
|
bfe39419c65c246a9012afc454c372e3db9cab82
|
[
"BSD-3-Clause"
] | null | null | null |
Analisando_dados_abertos.ipynb
|
jjanuario/datamining
|
bfe39419c65c246a9012afc454c372e3db9cab82
|
[
"BSD-3-Clause"
] | null | null | null | 100.893561 | 33,564 | 0.785686 |
[
[
[
"## São Paulo 04/07/2019 \n### Analisando dados abertos obtidos pelo portal da transparência\n#### [email protected]\n##### Dados: http://dados.gov.br/dataset/mec-pronatec-eptc",
"_____no_output_____"
]
],
[
[
"# Importando as bibliotecas necessarias\nimport pandas as pd",
"_____no_output_____"
],
[
"# Lendo o arquivos CSV com seus campos separados por ';' e encoding cp1252\n# Encoding cp1252 geralmente é resultado de um arquivos Excel salvo como CSV.\ndf = pd.read_csv('PDA_UNIDADES_RF_EPCT_CSV.csv', sep=';', encoding='cp1252')",
"_____no_output_____"
],
[
"#Lendo os dois priumeiros registros do arquivos.\ndf.head(2)",
"_____no_output_____"
],
[
"#Lendo os ultimos 15 registros do arquivo\ndf.tail(15)",
"_____no_output_____"
],
[
"# Cada linha corresponde a uma unidade, assim efetuando a contagem das linhas temos o numero de unidades.\ndf.count()",
"_____no_output_____"
],
[
"# Mostra os tipos de cada coluna do DataFrame \n# PS. object é um tipo generico indica que não foi reconhecido. \ndf.dtypes",
"_____no_output_____"
],
[
"# Quantas escolas existem em cada regiao\ndf['NOME_REGIAO_UNIDADE'].value_counts()",
"_____no_output_____"
],
[
"# Quantas escolas por estado\ndf['SIGLA_UF_UNIDADE'].value_counts()",
"_____no_output_____"
],
[
"# Melhorando a visualização utilizando graficos para mostrar o conteudo.\n#Precisamos informar que visualiaremos os dados no jupyter (inline)\n%matplotlib inline",
"_____no_output_____"
],
[
"# Monstrando grafico de barras de unidade por UF\ndf['SIGLA_UF_UNIDADE'].value_counts().plot.bar()",
"_____no_output_____"
],
[
"# Monstrando grafico de Pizza de unidade por UF\ndf['SIGLA_UF_UNIDADE'].value_counts().plot.pie()",
"_____no_output_____"
],
[
"#Agora um grafico pizza por regiao\n\ndf['NOME_REGIAO_UNIDADE'].value_counts().plot.bar()",
"_____no_output_____"
],
[
"# Unidades por municipio\ndf['NOME_MESORREGIAO_UNIDADE'].value_counts()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a23466bb16fd2947519e64de6d86ead4b75fd6b
| 41,172 |
ipynb
|
Jupyter Notebook
|
archived/MBZ-XML-TO-EXCEL-v0004.ipynb
|
angrave/Moodle-mbz-to-excel-util
|
299ab421076bee8a239780ea70cb3d3cd40674f1
|
[
"NCSA",
"Unlicense"
] | 1 |
2020-03-03T21:36:58.000Z
|
2020-03-03T21:36:58.000Z
|
archived/MBZ-XML-TO-EXCEL-v0004.ipynb
|
angrave/Moodle-mbz-to-excel
|
299ab421076bee8a239780ea70cb3d3cd40674f1
|
[
"NCSA",
"Unlicense"
] | null | null | null |
archived/MBZ-XML-TO-EXCEL-v0004.ipynb
|
angrave/Moodle-mbz-to-excel
|
299ab421076bee8a239780ea70cb3d3cd40674f1
|
[
"NCSA",
"Unlicense"
] | null | null | null | 42.012245 | 479 | 0.570825 |
[
[
[
"# MBZ-XML-TO-EXCEL\n\n\nFirst pubished version May 22, 2019. This is version 0.0004 (revision July 26, 2019)\n\nLicensed under the NCSA Open source license\nCopyright (c) 2019 Lawrence Angrave\nAll rights reserved.\n\nDeveloped by: Lawrence Angrave\n \nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers.\n Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution.\n Neither the names of Lawrence Angrave, University of Illinois nor the names of its contributors may be used to endorse or promote products derived from this Software without specific prior written permission. \n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. \n\n# Citations and acknowledgements welcomed!\n\nIn a presentation, report or paper please recognise and acknowledge the the use of this software.\nPlease contact [email protected] for a Bibliography citation. For presentations, the following is sufficient\n\nMBZ-XML-TO-EXCEL (https://github.com/angrave/Moodle-mbz-to-excel) by Lawrence Angrave.\nMBZ-XML-TO-EXCEL is an iLearn project, supported by an Institute of Education Sciences Award R305A180211\n\nIf also using Geo-IP data, please cite IP2Location. For example,\n\"This report uses geo-ip location data from IP2Location.com\"\n\n# Known limitations and issues\n\nThe assessment sheet (generated from workshop.xml) may generate URLs that are longer than 255 characters, \nthe largested supported by Excel. These very long URLs will be excluded\n\nNo verification of the data has been performed. \n\nIt is unknown if the inferred timestamps based on the Unix Epoch timestamp require a timezone adjustment.\n\n# Requirements\n\nThis project uses Python3, Jupiter notebooks and Pandas.",
"_____no_output_____"
],
[
"# Set up",
"_____no_output_____"
]
],
[
[
"#import xml.etree.ElementTree as ET\n#lxml supports line numbers\nimport lxml.etree as ET\n\nfrom collections import OrderedDict\nimport pandas as pd\nimport numpy as np\nimport re\nimport os\nimport urllib\nimport datetime\nimport glob\nimport tarfile\nimport tempfile\nimport base64\n# geoip support\nimport bisect\nimport ipaddress\n# timestamp support\nfrom datetime import datetime\n# Extract text from html messages\nfrom bs4 import BeautifulSoup\nimport uuid\nimport traceback\n\nimport xlsxwriter\nexcelengine = 'xlsxwriter' \n# 'xlsxwriter' is currently recommended though it did not improve the write speed using generic pandas interface)\n# Todo Perhaps using workbook interface directly will be faster? (https://xlsxwriter.readthedocs.io/)\n# io.excel.xlsx.writer' (default, allegedly slow),\n# 'pyexcelerate' (untested)",
"_____no_output_____"
]
],
[
[
"# Load GeoIP data (optional)",
"_____no_output_____"
]
],
[
[
"def load_geoip_data(geoip_datadir):\n global geoip_all_colnames, geoip_geo_columns,geoipv4_df,geoipv4_ipvalues\n geoip_all_colnames = ['geoip_ipfrom'\n ,'geoip_ipto'\n ,'geoip_country_code'\n ,'geoip_country_name'\n ,'geoip_region_name'\n ,'geoip_city_name'\n ,'geoip_latitude'\n ,'geoip_longitude'\n ,'geoip_zip_code'\n ,'geoip_time_zone']\n\n geoip_geo_columns = geoip_all_colnames[2:]\n\n #geoip_datadir = 'geoip' #change to your local directory of where the downloaded zip has been unpacked\n geoipv4_csv = os.path.join(geoip_datadir,'IP2LOCATION-LITE-DB11.CSV')\n\n if os.path.exists(geoipv4_csv):\n print(\"Reading geoip csv\",geoipv4_csv)\n geoipv4_df = pd.read_csv(geoipv4_csv, names= geoip_all_colnames)\n geoipv4_ipvalues = geoipv4_df['geoip_ipfrom'].values\n # bisect searching assumes geoipv4_ipvalues are in increasing order \n else:\n geoipv4_df = None\n geoipv4_ipvalues = None\n print(\"No GeoIP csv data at \",geoipv4_csv)\n print(\"IP addresses will not be converted into geographic locations\")\n print(\"Free Geo-IP data can be downloaded from IP2LOCATION.com\")\n ",
"_____no_output_____"
]
],
[
[
"# Phase 1 - Extract XMLs from mbz file and create hundreds of Excel files",
"_____no_output_____"
]
],
[
[
"# Each file can generate a list of tables (dataframes)\n# Recursively process each element. \n# For each non-leaf element we build an ordered dictionary of key-value pairs and attach this to an array for the particular element name\n# <foo id='1' j='a'> becomes data['foo'] = [ {'id':'1', j:'a'} ]\n# The exception is for leaf elements (no-child elements) in the form e.g. <blah>123</blah>\n# We treat these equivalently to attributes on the surrounding (parent) xml element\n# <foo id='1'><blah>123</blah></foo> becomes data['foo'] = [ {'id':'1', 'blah':'123'} ]\n# and no data['blah'] is created\n\nAUTOMATIC_IMPLICIT_XML_COLUMNS = 4 #SOURCE_LINE,PARENT_SHEET,PARENT_INDEX\n\ndef process_element(data,dest_basedir, tablename_list, context, e):\n #deprecated has_no_children = len(e.getchildren()) == 0\n has_no_children = len(e) == 0\n has_no_attribs = len(e.attrib.keys()) == 0\n text = e.text\n \n has_text = text is not None\n if has_text:\n text = text.strip()\n has_text = len(text) > 0\n \n # Is this a leaf element e.g. <blah>123</blah>\n # For the datasets we care about, leaves should not be tables; we only want their value \n ignore_attribs_on_leaves = True\n \n # This could be refactored to return a dictionary, so multiple attributes can be attached to the parent\n if has_no_children and (has_no_attribs or ignore_attribs_on_leaves):\n if not has_no_attribs: \n print()\n print(\"Warning: Ignoring attributes on leaf element:\" + e.tag+ \":\"+ str(e.attrib))\n print()\n return [e.tag,e.text] # Early return, attach the value to the parent (using the tag as the attribute name)\n \n table_name = e.tag\n if table_name not in data:\n tablename_list.append(table_name)\n data[table_name] = []\n \n key_value_pairs = OrderedDict()\n \n key_value_pairs['SOURCE_LINE'] = e.sourceline\n key_value_pairs['PARENT_SHEET'] = context[0]\n key_value_pairs['PARENT_ROW_INDEX'] = context[1]\n key_value_pairs['PARENT_ID'] = context[2]\n \n #print(e.sourceline)\n # For correctness child_context needs to be after this line and before recursion\n data[table_name].append(key_value_pairs)\n \n myid = ''\n if 'id' in e.attrib:\n myid = e.attrib['id']\n \n child_context = [table_name, len(data[table_name])-1, myid] # Used above context[0] during recursive call\n \n for key in sorted(e.attrib.keys()):\n key_value_pairs[key] = e.attrib[key]\n \n for child in e.iterchildren():\n # Could refactor here to use dictionary to enable multiple key-values from a discarded leaf\n key,value = process_element(data,dest_basedir, tablename_list, child_context, child)\n if value:\n if key in key_value_pairs:\n key_value_pairs[key] += ',' + str(value)\n else:\n key_value_pairs[key] = str(value)\n\n \n if has_text:\n key_value_pairs['TEXT'] = e.text # If at least some non-whitespace text, then use original text\n \n return [e.tag,None]",
"_____no_output_____"
],
[
"def tablename_to_sheetname(elided_sheetnames, tablename):\n sheetname = tablename\n # Future: There may be characters that are invalid. If so, remove them here..\n\n #Excel sheetnames are limited to 31 characters.\n max_excel_sheetname_length = 31\n if len(sheetname) <= max_excel_sheetname_length:\n return sheetname\n \n sheetname = sheetname[0:5] + '...' + sheetname[-20:]\n elided_sheetnames.append(sheetname)\n if elided_sheetnames.count(sheetname)>1:\n sheetname += str( elided_sheetnames.count(sheetname) + 1)\n \n return sheetname\n\ndef decode_base64_to_latin1(encoded_val):\n try:\n return str(base64.b64decode(encoded_val) , 'latin-1')\n except Exception as e:\n traceback.print_exc()\n print(\"Not base64 latin1?\", e)\n return '??Not-latin1 text'\n\n\ndef decode_geoip(ip):\n try:\n ip = ip.strip()\n if not ip or geoipv4_df is None:\n return pd.Series(None, index=geoip_geo_columns)\n \n ipv4 = int(ipaddress.IPv4Address(ip))\n index = bisect.bisect(geoipv4_ipvalues, ipv4) - 1\n entry = geoipv4_df.iloc[index]\n assert entry.geoip_ipfrom <= ipv4 and entry.geoip_ipto >= ipv4\n return entry[2:] # [geoip_geo_columns] # Drop ip_from and ip_to\n except Exception as e:\n traceback.print_exc()\n print(\"Bad ip?\",ip, e)\n return pd.Series(None, index=geoip_geo_columns)\n\ndef decode_unixtimestamp_to_UTC(seconds):\n if seconds == '':\n return ''\n try:\n return datetime.utcfromtimestamp(int(seconds)).strftime('%Y-%m-%d %H:%M:%S')\n except Exception as e:\n traceback.print_exc()\n print(\"Bad unix timestamp?\", seconds , e)\n return ''\n\ndef decode_html_to_text(html):\n if html is np.nan:\n return ''\n try:\n soup = BeautifulSoup(html,\"lxml\")\n return soup.get_text()\n except Exception as e:\n traceback.print_exc()\n print('Bad html?',html, e)\n return '???'\n\ndef validate_anonid_data(anonid_df):\n #Expected columns\n for c in ['anonid','userid']:\n if c not in anonid_df.columns:\n raise ('anonid_csv_file\\'' + anonid_csv_file + '\\'should have a column named '+c)\n \n # No duplicate userid entries\n check_for_duplicates = anonid_df['userid'].duplicated(keep=False)\n\n if check_for_duplicates.any():\n print(anonid_df[check_for_duplicates])\n raise Exception('See above - fix the duplicates userid entries found in \\'' + anonid_csv_file +'\\'')\n \n anonid_df['userid'] = anonid_df['userid'].astype(str)\n \ndef userid_to_anonid(userid):\n global anonid_df, generate_missing_anonid\n if userid is np.nan or len(userid) == 0:\n return ''\n\n row = anonid_df[ anonid_df['userid'] == userid ]\n if len( row ) == 1:\n return row['anonid'].values[0]\n \n if generate_missing_anonid: \n result = uuid.uuid4().hex\n anonid_df = anonid_df.append({ 'userid':userid, 'anonid':result}, ignore_index=True)\n else:\n result = ''\n \n return result\n\ndef to_dataframe(table_name, table_data):\n df = pd.DataFrame(table_data)\n # Moodle dumps use $@NULL@$ for nulls\n df.replace('$@NULL@$','',inplace = True)\n \n # We found two base64 encoded columns in Moodle data-\n for col in df.columns & ['other','configdata']:\n df[ str(col) + '_base64'] = df[str(col)].map(decode_base64_to_latin1)\n \n for col in df.columns & ['timestart','timefinish','added','backup_date','original_course_startdate','original_course_enddate','timeadded','firstaccess','lastaccess','lastlogin','currentlogin','timecreated','timemodified','created','modified']:\n df[ str(col) + '_utc'] = df[str(col)].map(decode_unixtimestamp_to_UTC)\n \n # Extract text from html content\n for col in df.columns & ['message', 'description','commenttext','intro','conclusion','summary','feedbacktext','content','feedback','info', 'questiontext' , 'answertext']:\n df[ str(col) + '_text'] = df[str(col)].map(decode_html_to_text)\n \n # Moodle data has 'ip' and 'lastip' that are ipv4 dotted\n # Currently only ipv4 is implemented. geoipv4_df is None if the cvs file was not found\n\n if geoipv4_df is None:\n for col in df.columns & ['ip','lastip']:\n df = df.join( df[str(col)].apply(decode_geoip) )\n\n for col in df.columns & ['userid','relateduserid' , 'realuserid']:\n col=str(col)\n if col == 'userid':\n out = 'anondid'\n else: \n out = col[0:-6] + '_anonid'\n df[ out ] = df[col].map(userid_to_anonid)\n if delete_userids:\n df.drop(columns=[col],inplace=True)\n \n if table_name == 'user':\n df['anonid'] = df['id'].map(userid_to_anonid)\n \n # Can add more MOODLE PROCESSING HERE :-)\n return df\n \n\ndef to_absolute_file_url(filepath):\n return urllib.parse.urljoin( 'file:', urllib.request.pathname2url(os.path.abspath(filepath)))\n\ndef write_excel_sheets(source_file, excelwriter, data, tablename_list): \n elided_sheetnames = []\n table_sheet_mapping = dict()\n table_sheet_mapping[''] = '' # Top level parents have empty PARENT_SHEET\n \n for tablename in tablename_list:\n sheetname = tablename_to_sheetname(elided_sheetnames, tablename)\n table_sheet_mapping[tablename] = sheetname\n \n for tablename in tablename_list:\n df = to_dataframe(tablename, data[tablename])\n #Convert table (=original xml tag) into real sheet name (not tag name)\n if 'PARENT_SHEET' in df.columns:\n df['PARENT_SHEET'] = df['PARENT_SHEET'].apply(lambda x: table_sheet_mapping[x])\n \n df.index.rename(tablename, inplace=True)\n df.insert(0, 'SOURCE_FILE',source_file ,allow_duplicates=True)\n df.insert(1, 'SOURCE_TAG', tablename, allow_duplicates=True)\n sheetname = table_sheet_mapping[tablename]\n \n if sheetname != tablename:\n print(\"Writing \"+ tablename + \" as sheet \"+ sheetname)\n else:\n print(\"Writing sheet \"+ sheetname)\n \n df.to_excel(excelwriter, sheet_name=sheetname, index_label=tablename)\n return table_sheet_mapping",
"_____no_output_____"
],
[
"def re_adopt_child_table(data, parent_tablename, parent_table, child_tablename):\n child_table = data[child_tablename]\n for row in child_table:\n if 'PARENT_SHEET' not in row.keys():\n continue\n if row['PARENT_SHEET'] == parent_tablename:\n idx = row['PARENT_ROW_INDEX']\n # Time to follow the pointer\n parent_row = parent_table[idx]\n #row['PARENT_TAG'] = parent_row['PARENT_TAG']\n row['PARENT_ROW_INDEX'] = parent_row['PARENT_ROW_INDEX']\n row['PARENT_ID'] = parent_row['PARENT_ID']\n row['PARENT_SHEET'] = parent_row['PARENT_SHEET']\n \n \ndef discard_empty_tables(data,tablename_list):\n nonempty_tables = []\n for tablename in tablename_list:\n table = data[tablename]\n # print(tablename, len(table),'rows')\n if len(table) == 0:\n # print(\"Skipping empty table\",tablename)\n continue\n \n include = False\n for row in table:\n if len(row) > AUTOMATIC_IMPLICIT_XML_COLUMNS: # Found more than just PARENT_TAG,... columns\n include = True\n break\n \n if include:\n # print(\"Including\",tablename)\n nonempty_tables.append(tablename)\n else:\n # print(\"Skipping unnecessary table\",tablename)\n # Will need to fixup child items that still think this is their container\n # More efficient if we kept a mapping of child tables, rather than iterate over tables\n for childname in tablename_list:\n re_adopt_child_table(data, tablename, table, childname)\n pass\n\n return nonempty_tables",
"_____no_output_____"
],
[
"def process_one_file(dest_basedir, relative_sub_dir, xml_filename, dry_run):\n print('process_one_file(\\''+dest_basedir+'\\',\\''+relative_sub_dir+'\\',\\''+xml_filename+'\\')')\n #print(\"Reading XML \" + xml_filename)\n #Original parser \n xmlroot = ET.parse(xml_filename).getroot()\n # Use lxml\n #xmlroot = etree.parse(xml_filename)\n \n #print(\"Processing...\")\n data = dict()\n tablename_list = []\n \n initial_context = ['','',''] # Todo : Consider missing integer index e.g. ['',None,'']\n process_element(data, dest_basedir ,tablename_list, initial_context, xmlroot)\n \n nonempty_tables = discard_empty_tables(data,tablename_list)\n \n if len(nonempty_tables) == 0:\n #print(\"no tables left to write\")\n return\n \n # We use underscore to collate source subdirectories\n basename = os.path.basename(xml_filename).replace('.xml','').replace('_','')\n \n use_sub_dirs = False\n if use_sub_dirs:\n output_dir = os.path.join(dest_basedir, relative_sub_dir)\n\n if not os.path.exists(output_dir): \n os.mkdirs(output_dir)\n\n output_filename = os.path.join(output_dir, basename + '.xlsx')\n else:\n sub = relative_sub_dir.replace(os.sep,'_').replace('.','')\n if (len(sub) > 0) and sub[-1] != '_':\n sub = sub + '_'\n output_filename = os.path.join(dest_basedir, sub + basename + '.xlsx')\n \n if dry_run: # For debugging\n return\n \n print(\"** Writing \", output_filename)\n\n if os.path.exists(output_filename):\n os.remove(output_filename)\n \n excelwriter = pd.ExcelWriter(output_filename, engine= excelengine)\n \n # absolute path is useful to open original files on local machine\n if(False):\n source_file = to_absolute_file_url(xml_filename)\n else:\n source_file = os.path.normpath(xml_filename)\n \n try:\n write_excel_sheets(source_file, excelwriter, data,nonempty_tables)\n excelwriter.close()\n except Exception as ex:\n traceback.print_exc()\n print(type(ex))\n print(ex)\n pass\n finally:\n \n excelwriter = None\n print()\n\ndef process_directory(xml_basedir, out_basedir, relative_sub_dir,toplevel_xml_only, dry_run):\n xml_dir = os.path.join(xml_basedir, relative_sub_dir)\n file_list = sorted(os.listdir(xml_dir))\n \n for filename in file_list:\n if filename.endswith('.xml'):\n print(\"Processing\", filename)\n process_one_file(out_basedir, relative_sub_dir, os.path.join(xml_dir,filename), dry_run)\n \n if toplevel_xml_only:\n return # No recursion into subdirs(e.g. for testing)\n \n # Recurse\n for filename in file_list:\n candidate_sub_dir = os.path.join(relative_sub_dir, filename)\n if os.path.isdir( os.path.join(xml_basedir, candidate_sub_dir)) : \n process_directory(xml_basedir, out_basedir, candidate_sub_dir,toplevel_xml_only, dry_run)",
"_____no_output_____"
],
[
"def extract_xml_files_in_tar(tar_file, extract_dir):\n os.makedirs(extract_dir)\n extract_count = 0\n for tarinfo in tar_file:\n if os.path.splitext(tarinfo.name)[1] == \".xml\":\n #print(extract_dir, tarinfo.name)\n tar_file.extract( tarinfo, path = extract_dir)\n extract_count = extract_count + 1\n return extract_count\n \ndef archive_file_to_output_dir(archive_file):\n return os.path.splitext(archive_file)[0] + '-out'\n\ndef archive_file_to_xml_dir(archive_file):\n return os.path.splitext(archive_file)[0] + '-xml'\n \ndef lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found):\n has_xml_files = len( glob.glob( os.path.join(expanded_archive_directory,'*.xml') ) ) > 0\n \n if has_xml_files and skip_expanding_if_xml_files_found:\n print(\"*** Reusing existing xml files in\", expanded_archive_directory)\n return\n \n if os.path.isdir(expanded_archive_directory):\n print(\"*** Deleting existing files in\", expanded_archive_directory)\n raise \"Comment out this line if it is going to delete the correct directory\"\n shutil.rmtree(expanded_archive_directory)\n \n with tarfile.open(archive_source_file, mode='r|*') as tf:\n print(\"*** Expanding\",archive_source_file, \"to\", expanded_archive_directory)\n extract_count = extract_xml_files_in_tar(tf, expanded_archive_directory)\n print('***',extract_count,' xml files extracted')\n \ndef process_xml_files(expanded_archive_directory,out_basedir,toplevel_xml_only,dry_run, anonid_output_csv):\n global anonid_df\n \n print(\"*** Source xml directory :\", expanded_archive_directory)\n print(\"*** Output directory:\", out_basedir)\n\n if not os.path.isdir(out_basedir): \n os.makedirs(out_basedir)\n\n process_directory(expanded_archive_directory, out_basedir,'.',toplevel_xml_only,dry_run)\n \n if anonid_output_csv:\n filepath = os.path.join(out_basedir,anonid_output_csv)\n print(\"Writing \",filepath,len(anonid_df.index),'rows')\n anonid_df.to_csv( filepath, index = None, header=True)\n \n print(\"*** Finished processing XML\")",
"_____no_output_____"
]
],
[
[
"# Phase 2 - Aggregate Excel documents",
"_____no_output_____"
]
],
[
[
"def list_xlsx_files_in_dir(xlsx_dir):\n xlsx_files = sorted(glob.glob(os.path.join(xlsx_dir,'*.xlsx')))\n xlsx_files = [file for file in xlsx_files if os.path.basename(file)[0] != '~' ]\n return xlsx_files\n\n# Phase 2 - Aggregate multiple xlsx that are split across multiple course sections into a single Excel file\ndef create_aggregate_sections_map(xlsx_dir):\n xlsx_files = list_xlsx_files_in_dir(xlsx_dir)\n \n sections_map = dict()\n\n for source_file in xlsx_files:\n path = source_file.split(os.path.sep) # TODO os.path.sep\n nameparts = path[-1].split('_')\n target = nameparts[:]\n subnumber = None\n if len(nameparts)>3 and nameparts[-3].isdigit(): subnumber = -3 # probably unnecessary as _ are removed from basename\n if len(nameparts)>2 and nameparts[-2].isdigit(): subnumber = -2\n if not subnumber: continue\n\n target[subnumber] = 'ALLSECTIONS'\n\n key = (os.path.sep.join(path[:-1])) + os.path.sep+ ( '_'.join(target))\n if key not in sections_map.keys():\n sections_map[key] = []\n sections_map[key].append(source_file)\n return sections_map\n\n# Phase 3 - Aggregate over common objects\ndef create_aggregate_common_objects_map(xlsx_dir):\n xlsx_files = list_xlsx_files_in_dir(xlsx_dir)\n \n combined_map = dict()\n # path/_activities_workshop_ALLSECTIONS_logstores.xlsx will map to key=logstores.xlsx\n for source_file in xlsx_files:\n path = source_file.split(os.path.sep) # TODO os.path.sep\n nameparts = path[-1].split('_')\n target = nameparts[-1]\n\n if 'ALL_' == path[-1][:4]:\n continue # Guard against restarts\n\n key = (os.path.sep.join(path[:-1])) + os.path.sep+ ('ALL_' + target)\n if key not in combined_map.keys():\n combined_map[key] = []\n combined_map[key].append(source_file)\n\n return combined_map ",
"_____no_output_____"
],
[
"def rebase_row(row,rebase_map):\n if isinstance(row['PARENT_SHEET'] , str): \n return str(int(row['PARENT_ROW_INDEX']) + int(rebase_map[ row['XLSX_SOURCEFILE'] + '#' + row['PARENT_SHEET'] ]))\n else:\n return ''\n\n\ndef check_no_open_Excel_documents_in_Excel(dir):\n # Excel creates temporary backup files that start with tilde when an Excel file is open in Excel\n if not os.path.isdir(dir):\n return\n open_files = glob.glob(os.path.join(dir,'~*.xlsx'))\n if len(open_files):\n print( 'Please close ' + '\\n'.join(open_files) + '\\nin directory\\n'+dir)\n raise IOError('Excel files '+('\\n'.join(open_files))+' are currently open in Excel')\n \ndef aggregate_multiple_excel_files(source_filenames):\n allsheets = OrderedDict()\n rebase_map = {}\n # !! Poor sort - it assumes the integers are the same char length. Todo improve so that filename_5_ < filename_10_ \n for filename in sorted(source_filenames):\n print('Reading and aggregating sheets in' , filename)\n xl = pd.ExcelFile(filename)\n for sheet in xl.sheet_names:\n \n df = xl.parse(sheet)\n df['XLSX_SOURCEFILE'] = filename\n if sheet not in allsheets.keys():\n allsheets[sheet] = df\n rebase_map[filename+'#'+sheet] = 0\n else:\n row_offset = len(allsheets[sheet]) \n rebase_map[filename+'#'+sheet] = row_offset # We will need this to rebase parent values\n df[ df.columns[0] ] += row_offset\n allsheets[sheet] = allsheets[sheet].append(df, ignore_index =True, sort = False)\n xl.close()\n \n # print('rebase_map',rebase_map)\n # The row index of the parent no longer starts at zero\n print('Rebasing parent index entries in all sheets') \n for sheet in xl.sheet_names:\n df = allsheets[sheet] \n df['PARENT_ROW_INDEX'] = df.apply( lambda row: rebase_row( row,rebase_map), axis = 1)\n df.drop('XLSX_SOURCEFILE', axis = 1, inplace = True)\n return allsheets\n\ndef write_aggregated_model(output_filename, allsheets, dry_run):\n print(\"Writing\",output_filename)\n if dry_run:\n print(\"Dry run. Skipping \", allsheets.keys())\n return\n \n excelwriter = pd.ExcelWriter(output_filename, engine = excelengine)\n try:\n print(\"Writing Sheets \", allsheets.keys())\n for sheetname,df in allsheets.items():\n df.to_excel(excelwriter, sheet_name = sheetname, index = 'INDEX')\n excelwriter.close()\n \n except Exception as ex:\n print(type(ex))\n print(ex)\n pass\n \n finally:\n excelwriter.close()\n print('Writing finished\\n')\n\ndef move_old_files(xlsx_dir, filemap, subdirname,dry_run):\n xlsxpartsdir = os.path.join(xlsx_dir,subdirname)\n if dry_run:\n print('Dry run. Skipping move_old_files', filemap.items(),' to ', subdirname)\n return\n \n if not os.path.isdir(xlsxpartsdir): \n os.mkdir(xlsxpartsdir)\n\n for targetfile,sources in filemap.items():\n for file in sources:\n\n dest=os.path.join(xlsxpartsdir, os.path.basename(file))\n print(dest)\n os.rename(file, dest)\n\ndef aggreate_over_sections(xlsx_dir,dry_run):\n sections_map= create_aggregate_sections_map(xlsx_dir)\n\n for targetfile,sources in sections_map.items():\n allsheets = aggregate_multiple_excel_files(sources)\n write_aggregated_model(targetfile, allsheets, dry_run)\n\n move_old_files(xlsx_dir, sections_map,'_EACH_SECTION_', dry_run)\n\ndef aggreate_over_common_objects(xlsx_dir,dry_run):\n combined_map = create_aggregate_common_objects_map(xlsx_dir)\n \n for targetfile,sources in combined_map.items():\n allsheets = aggregate_multiple_excel_files(sources)\n write_aggregated_model(targetfile, allsheets, dry_run)\n \n move_old_files(xlsx_dir, combined_map, '_ALL_SECTIONS_', dry_run)\n\ndef create_column_metalist(xlsx_dir,dry_run):\n xlsx_files = list_xlsx_files_in_dir(xlsx_dir)\n \n metalist = []\n\n for filename in xlsx_files:\n print(filename)\n xl = pd.ExcelFile(filename)\n filename_local = os.path.basename(filename)\n\n for sheet in xl.sheet_names:\n\n df = xl.parse(sheet,nrows=1)\n\n for column_name in df.columns:\n metalist.append([filename_local,sheet,column_name])\n xl.close()\n\n meta_df = pd.DataFrame(metalist, columns=['file','sheet','column'])\n\n meta_filename = os.path.join(xlsx_dir,'__All_COLUMNS.csv')\n if dry_run:\n print('Dry run. Skipping',meta_filename)\n else:\n meta_df.to_csv(meta_filename,sep='\\t',index=False)",
"_____no_output_____"
]
],
[
[
"# Run",
"_____no_output_____"
]
],
[
[
"# Configuration / settings here\narchive_source_file = None\n\nexpanded_archive_directory = None \n\nskip_expanding_if_xml_files_found = True\n\noutput_directory = None\n\ngenerate_missing_anonid = True\n\ngeoip_datadir = None\n\nanonid_csv_file = None\n# A simple csv file with header 'userid','anonid'\n\nanonid_output_filename='userids_anonids.csv' # None if mapping should not be written\n\ndelete_userids = False # User table will still have an 'id' column \n#relateduserids,realuserid andu userid columns in other tables are dropped\n\n# Internal testing options\ntoplevel_xml_only = False # Don't process subdirectories. Occasionally useful for internal testing\ndry_run = False # Don't write Excel files. Occasionally useful for internal testing",
"_____no_output_____"
],
[
"# Override the above here with the path to your mbz file (or expanded contents)\narchive_source_file = os.path.join('..','example.mbz')\n# ... or use expanded_archive_directory to point to an mbz file that has already been expanded into XML files\n\nanonid_csv_file = None # os.path.join('..', 'example-userid-to-anonid.csv') \ngenerate_missing_anonid = True\ndelete_userids = True\n\n\ngeoip_datadir= './geoip'\n\n# Some typical numbers:\n# A 400 student 15 week course with 16 sections\n# Created a 4GB mbz which expanded to 367 MB of xml. (the non-xml files were not extracted)\n# 30 total minutes processing time: 15 minutes to process xml, \n# 6 minutes for each aggegration step, 2 minutes for the column summary\n# Final output: 60MB of 'ALL_' Excel 29 files (largest: ALL_quiz.xlsx 35MB, ALL_logstores 10MB, ALL_forum 5MB)\n# The initial section output (moved to _EACH_SECTION_/) has 334 xlsx files, \n# which is futher reduced (see _ALL_SECTIONS_ ) 67 files.\n",
"_____no_output_____"
],
[
"if not archive_source_file and not expanded_archive_directory:\n raise ValueError('Nothing to do: No mbz archive file or archive directory (with .xml files) specified')\n\nif archive_source_file and not os.path.isfile(archive_source_file) :\n raise ValueError('archive_source_file (' + os.path.abspath(archive_source_file) + \") does not refer to an existing archive\")\n\nif not expanded_archive_directory:\n expanded_archive_directory = archive_file_to_xml_dir(archive_source_file)\n\nif not output_directory:\n if archive_source_file:\n output_directory = archive_file_to_output_dir(archive_source_file)\n else:\n raise ValueError('Please specify output_directory')\n \nif anonid_csv_file:\n print ('Using ' + anonid_csv_file + ' mapping')\n anonid_df = pd.read_csv(anonid_csv_file)\n \n validate_anonid_data(anonid_df)\nelse:\n anonid_df = pd.DataFrame([{'userid':'-1','anonid':'example1234'}])\n\n\nstart_time = datetime.now()\nprint(start_time)\n\nif(geoip_datadir and 'geoipv4_df' not in globals()):\n load_geoip_data(geoip_datadir)\n \nif archive_source_file:\n lazy_extract_mbz(archive_source_file,expanded_archive_directory,skip_expanding_if_xml_files_found)\n\ncheck_no_open_Excel_documents_in_Excel(output_directory)\n# Now the actual processing can begin\n\nprocess_xml_files(expanded_archive_directory,output_directory, toplevel_xml_only, dry_run, anonid_output_filename)\n# At this point we have 100s of Excel documents (one per xml file), each with several sheets (~ one per xml tag)!\n# We can aggregate over all of the course sections\naggreate_over_sections(output_directory, dry_run)\n\n# Workshops, assignments etc have a similar structure, so we also aggregate over similar top-level objects\naggreate_over_common_objects(output_directory, dry_run)\ncreate_column_metalist(output_directory, dry_run)\n\nend_time = datetime.now()\nprint(end_time)\nprint(end_time-start_time)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a234d4b579f642ddb63b36de2f2eaa556f042f1
| 44,309 |
ipynb
|
Jupyter Notebook
|
Clustering/E_DBScan.ipynb
|
ArriagaAmin/Maraton-Machine-Learning
|
a5fd791c4e45ee904c8d13bfae9b8f80bf04e65c
|
[
"CNRI-Python"
] | null | null | null |
Clustering/E_DBScan.ipynb
|
ArriagaAmin/Maraton-Machine-Learning
|
a5fd791c4e45ee904c8d13bfae9b8f80bf04e65c
|
[
"CNRI-Python"
] | null | null | null |
Clustering/E_DBScan.ipynb
|
ArriagaAmin/Maraton-Machine-Learning
|
a5fd791c4e45ee904c8d13bfae9b8f80bf04e65c
|
[
"CNRI-Python"
] | null | null | null | 122.400552 | 16,044 | 0.839604 |
[
[
[
"# **DBSCAN**",
"_____no_output_____"
],
[
"## **Implementacion**",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom math import e, inf\nfrom random import randint, uniform\nfrom sklearn.datasets import make_circles",
"_____no_output_____"
]
],
[
[
"### KNN",
"_____no_output_____"
]
],
[
[
"class Node:\n def __init__(self, parent, x, area):\n self.parent = parent\n self.x = x\n self.childs = [None, None] # [left_child, right_child]\n # El area es un vector 2*len(x)-dimensional y representa un hipercubo, donde cada\n # par de elementos representan los valores minimos y maximos de una determinada coordenada.\n # Por ejemplo, si len(x) == 2, entonces area = [a, b, c, d] representa el cuadrado:\n # a <= x[0] <= b; c <= x[1] <= d\n self.area = area\n\nclass KNN:\n def __init__(self, X):\n self.X = X\n \n def d(self, x, y):\n \"\"\" Distancia euclidiana entre dos vectores. \"\"\"\n return np.linalg.norm(x-y)\n \n def build_kd_tree(self, X=None, parent=None, right=True, d=0, root=True, area=None):\n \"\"\" Construimos un KD-Tree.\n INPUT:\n X: Conjunto de datos del nodo actual.\n parent: Nodo padre del nodo actual.\n right: Indica si el nodo actual es el hijo derecho.\n d: Atributo que se usara para realizar la division binaria de los datos.\n root: Indica si el nodo actual es la raiz de todo el arbol.\n area: Area que representa el nodo actual.\n \"\"\"\n # Si el nodo es la raiz, entonces tomamos todos los datos y el area es todo el espacio.\n if root: \n X = self.X\n area = [-inf,inf]*len(X[0])\n \n # Si no hay elementos, no se crea ningun nodo\n if len(X) == 0: return\n # Si solo hay un elemento, creamos un nodo con ese unico elemento.\n elif len(X) == 1:\n node = Node(parent, X[0], area)\n # Verificamos que el nodo no sea la raiz, lo que significaria que solo hay un dato.\n if not root: parent.childs[int(right)] = node\n # Si hay mas de un dato.\n else:\n # Ordenamos los elementos segun el d-esimo atributo.\n X_c = X.copy()\n X_c.sort(key = lambda x: x[d])\n # Obtenemos la mediana.\n m = int(len(X_c)/2)\n x_m = X_c[m]\n # Creamos un nuevo nodo donde se almacenara la mediana.\n node = Node(parent, x_m, area)\n if not root: parent.childs[int(right)] = node\n else: self.kd_tree = node\n # Llamamos recursivamente la funcion para los hijos izquierdo y derecho.\n # Derecho\n X_r = X_c[m+1:].copy()\n area_r = area.copy()\n area_r[2*d] = x_m[d]\n # Izquierdo\n X_l = X_c[:m].copy()\n area_l = area.copy()\n area_l[2*d+1] = x_m[d]\n # Llamada recursiva\n self.build_kd_tree(X_l, node, False, (d+1)%len(x_m), False, area_l)\n self.build_kd_tree(X_r, node, True, (d+1)%len(x_m), False, area_r)\n \n def radius_neighbors(self, x, r):\n # Aqui almacenamos los vecinos\n self.neighbors = []\n\n self.r_neighbors(x, self.kd_tree, 0, r)\n\n neighbors = self.neighbors\n # Nos aseguramos de eliminar estos atributos.\n self.neighbors = None\n return neighbors\n\n def r_neighbors(self, x, node, d, r):\n # Verificamos si el punto se encuentra fuera del hipercubo definido por el nodo actual.\n if not all(node.area[2*i] <= x[i] <= node.area[2*i+1] for i in range(len(x))):\n # Por cada dimension, verificamos si el punto se encuentra dentro de los lados\n # correspondientes al hipercubo\n p = []\n for i in range(len(x)):\n # Si no es asi, almacenamos la coordenada del punto que se encuentra fuera del\n # lado del hipercubo.\n if node.area[2*i] > x[i]: p.append(node.area[2*i])\n elif x[i] > node.area[2*i+1]: p.append(node.area[2*i+1])\n else: p.append(x[i])\n \n \n # Calculamos la distancia entre las coordenadas del punto fuera del hipercubo y\n # la interseccion de los lados correspondientes. Si es mayor al radio, no necesitamos\n # verificar mas esta rama.\n dist = self.d(np.array(p), x)\n if dist > r: return\n \n # Calculamos la distancia entre el punto y la raiz actual. Verificamos si es menor\n # que el raio\n dist = self.d(x, node.x)\n if dist < r: self.neighbors.append(node.x)\n \n # Llamamos primero a la subdivision del arbol tal que el punto cumpla la condicion,\n # con la esperanza de que al llamar el segundo hijo, este pueda ser descartado facilmente.\n # Si no cumple ninguna, se recorre primero el hijo izquierdo (si no es nulo) y luego el derecho.\n if x[d] <= node.area[2*d+1] and node.childs[0] != None: \n self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)\n if node.childs[1] != None:\n self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)\n \n elif x[d] >= node.area[2*d] and node.childs[1] != None: \n self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)\n if node.childs[0] != None:\n self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)\n \n elif node.childs[0] != None: \n self.r_neighbors(x, node.childs[0], (d+1)%len(x), r)\n if node.childs[1] != None:\n self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)\n \n elif node.childs[1] != None:\n self.r_neighbors(x, node.childs[1], (d+1)%len(x), r)",
"_____no_output_____"
]
],
[
[
"### **DBScan**",
"_____no_output_____"
]
],
[
[
"class DBS:\n def __init__(self, X):\n self.X = X\n # Usaremos el KNN para obtener los vecinos\n self.knn = KNN(X)\n self.knn.build_kd_tree()\n\n def d(self, x, y):\n \"\"\" Distancia Euclidiana. \"\"\"\n return np.linalg.norm(x-y)\n\n def query(self, x, dist):\n \"\"\" Calculamos los vecinos de un elemento dada una distancia minima. \"\"\"\n return self.knn.radius_neighbors(x, dist)\n\n def clustering(self, dist, min_x):\n \"\"\" Agrupamos los datos usando el metodo de DBScan. \"\"\"\n # Contador de clusters.\n C = -1\n # Diccionario label[x] -> C tal que x in C\n labels = {tuple(x) : None for x in self.X}\n \n for x in self.X:\n # Si el elemento ya fue etiquetado, pasamos al siguiente.\n if labels[tuple(x)] != None: continue\n neighbors = self.query(x, dist)\n \n # Si el elemento no tiene suficientes vecinos, es un dato atipico.\n if len(neighbors) < min_x:\n labels[tuple(x)] = -1\n continue\n \n # Pasamos a un nuevo cluster, etiquetamos el elemento con el cluster actual.\n C += 1\n labels[tuple(x)] = C\n # Sacamos al elemento de sus propios vecinos y creamos el conjunto semilla.\n for i in range(len(neighbors)):\n if np.equal(neighbors[i], x).all(): \n neighbors.pop(i)\n break\n seed_set = neighbors.copy()\n \n for s in seed_set:\n # Si el elemento fue consierado atipico, ahora sera etiquetado con el\n # cluster actual.\n if labels[tuple(s)] == -1: labels[tuple(s)] = C\n # Si ya tiene etiqueta, pasamos al siguiente elemento.\n if labels[tuple(s)] != None: continue\n \n # Etiquetamos al elemento con el cluster actual.\n labels[tuple(s)] = C\n # Calculamos los vecinos del elemento.\n neighbors = self.query(s, dist)\n # Si el elemento tiene suficientes vecinos.\n if len(neighbors) >= min_x:\n # Unimos los conjuntos \"neighbors\" y \"seed_set\"\n for n in neighbors: \n if not any(np.equal(n, ss).all() for ss in seed_set): seed_set.append(n)\n \n return labels\n ",
"_____no_output_____"
]
],
[
[
"## **Lectura de Datos**",
"_____no_output_____"
]
],
[
[
"nb_samples = 150\n\nX0 = np.expand_dims(np.linspace(-2 * np.pi, 2 * np.pi, nb_samples), axis=1)\nY0 = -5 - np.cos(2.0 * X0) + np.random.uniform(0.0, 2.0, size=(nb_samples, 1))\n \nX1 = np.expand_dims(np.linspace(-2 * np.pi, 2 * np.pi, nb_samples), axis=1)\nY1 = 3.5 - np.cos(2.0 * X0) + np.random.uniform(0.0, 2.0, size=(nb_samples, 1))\n \ndata_0 = np.concatenate([X0, Y0], axis=1)\ndata_1 = np.concatenate([X1, Y1], axis=1)\ndata = np.concatenate([data_0, data_1], axis=0)\ndata = [d for d in data]\nfor c in make_circles(30)[0]: data.append(c)\n\nplt.plot([d[0] for d in data], [d[1] for d in data], 'o')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## **Resultados**",
"_____no_output_____"
]
],
[
[
"dbs = DBS(data)\nlabels = dbs.clustering(1.5, 5)",
"_____no_output_____"
],
[
"clusters = [[] for _ in range(max(labels.values())+2)]\nfor x in labels:\n clusters[labels[tuple(x)]].append(x)\nfor c in clusters:\n plt.plot([x[0] for x in c], [x[1] for x in c], 'o')\nplt.show()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a23565f83a8ac7ba0ba0b9723010658d752773e
| 10,457 |
ipynb
|
Jupyter Notebook
|
Introduction to Python/Errors & Exception Handling, try except finally.ipynb
|
Xelepam/Python_Scripts
|
72779c5b1fa4188b7aaf8def39d5d1902002010b
|
[
"MIT"
] | null | null | null |
Introduction to Python/Errors & Exception Handling, try except finally.ipynb
|
Xelepam/Python_Scripts
|
72779c5b1fa4188b7aaf8def39d5d1902002010b
|
[
"MIT"
] | null | null | null |
Introduction to Python/Errors & Exception Handling, try except finally.ipynb
|
Xelepam/Python_Scripts
|
72779c5b1fa4188b7aaf8def39d5d1902002010b
|
[
"MIT"
] | null | null | null | 33.841424 | 1,045 | 0.544898 |
[
[
[
"2 + 's'",
"_____no_output_____"
],
[
"try:\n 2 + 's'\nexcept TypeError:\n print('There was a type error!')\nfinally:\n print('Finally this was printed.')",
"There was a type error!\nFinally this was printed.\n"
],
[
"try:\n f = open('testfile343', 'w')\n f.write('Test write this')\nexcept: \n print('Error in writing to file.')\nelse:\n print('File write was a success.')",
"File write was a success.\n"
],
[
"try:\n f = open('testfile343', 'r')\n f.write('Test write this')\nexcept: \n print('Error in writing to file.')\nelse:\n print('File write was a success.')",
"Error in writing to file.\n"
],
[
"try:\n f = open('testfile343', 'r')\n f.write('Test write this')\nfinally:\n print('Always execute finally code block.')",
"Always execute finally code block.\n"
],
[
"try:\n f = open('testfile343', 'r')\n f.write('Test write this')\nexcept:\n print('There was an error.')\nfinally:\n print('finally code block always executed.')",
"There was an error.\nfinally code block always executed.\n"
],
[
"def askint():\n \n try:\n val = int(input('Please enter an interger: '))\n except:\n print('Looks like you did not enter and interger.')\n val = int(input('Please try again. Please enter an interger: '))\n finally:\n print('finally block executed.')\n print(val)",
"_____no_output_____"
],
[
"askint()",
"Please enter an interger: g\nLooks like you did not enter and interger.\nPlease try again. Please enter an interger: f\nfinally block executed.\n"
],
[
"askint()",
"Please enter an interger: 3\nfinally block executed.\n3\n"
],
[
"def askforint():\n \n while True:\n try:\n val = int(input('Please enter an interger: '))\n except:\n print('Looks like you did not enter and interger.')\n continue\n else:\n print('Thank you for entering an interger.')\n break\n finally:\n print('finally block executed.')\n \n print(val)",
"_____no_output_____"
],
[
"askforint()",
"Please enter an interger: r\nLooks like you did not enter and interger.\nfinally block executed.\nPlease enter an interger: r\nLooks like you did not enter and interger.\nfinally block executed.\nPlease enter an interger: r\nLooks like you did not enter and interger.\nfinally block executed.\nPlease enter an interger: 5\nThank you for entering an interger.\nfinally block executed.\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2356b826da343bb4ba10f40cd356230b7adf8c
| 2,692 |
ipynb
|
Jupyter Notebook
|
get_HFR.ipynb
|
teresaupdyke/ohw21-proj-coastal-radar
|
d5f81f9fa05de7af2050c99b30c149696258ce22
|
[
"MIT"
] | 1 |
2021-08-09T03:14:19.000Z
|
2021-08-09T03:14:19.000Z
|
get_HFR.ipynb
|
teresaupdyke/ohw21-proj-coastal-radar
|
d5f81f9fa05de7af2050c99b30c149696258ce22
|
[
"MIT"
] | 15 |
2021-08-04T13:26:39.000Z
|
2021-08-06T17:55:59.000Z
|
get_HFR.ipynb
|
teresaupdyke/ohw21-proj-coastal-radar
|
d5f81f9fa05de7af2050c99b30c149696258ce22
|
[
"MIT"
] | 3 |
2021-08-04T12:59:56.000Z
|
2021-08-04T15:01:29.000Z
| 24.925926 | 155 | 0.578009 |
[
[
[
"from netCDF4 import Dataset\nimport netCDF4 as netcdf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport matplotlib as mpl\n\n#mapping\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom cartopy.io import shapereader\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\n\nimport xarray as xr\nimport xarray.ufuncs as xu\n\n",
"_____no_output_____"
],
[
"hfr_url = \"https://hfrnet-tds.ucsd.edu/thredds/dodsC/HFR/USEGC/6km/hourly/RTV/HFRADAR_US_East_and_Gulf_Coast_6km_Resolution_Hourly_RTV_best.ncd\"\ndata = xr.open_dataset(hfr_url)",
"_____no_output_____"
],
[
"min_lon = -76.\nmin_lat = 36.5\nmax_lon = -73.\nmax_lat = 39.5\n\nmask_lon = (data.lon >= min_lon) & (data.lon <= max_lon)\nmask_lat = (data.lat >= min_lat) & (data.lat <= max_lat)\n",
"_____no_output_____"
],
[
"time subset_ds = data.sel(time=slice(\"2020-05-01T00:00:00\", \"2020-05-31T23:00:00\")).where(mask_lon & mask_lat, drop=True)",
"CPU times: user 464 ms, sys: 149 ms, total: 613 ms\nWall time: 20 s\n"
],
[
"subset_ds.to_netcdf(path='./data/hf_radar_05_2020.nc')",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a23910154933f5f0db63dca0243c51e8b2e862a
| 493,594 |
ipynb
|
Jupyter Notebook
|
ADM-HOMEWORK2.ipynb
|
giuliacasale/ADM-HW2
|
57022bf2c3b0d3ea604e507fd44fca878f462c0a
|
[
"MIT"
] | null | null | null |
ADM-HOMEWORK2.ipynb
|
giuliacasale/ADM-HW2
|
57022bf2c3b0d3ea604e507fd44fca878f462c0a
|
[
"MIT"
] | null | null | null |
ADM-HOMEWORK2.ipynb
|
giuliacasale/ADM-HW2
|
57022bf2c3b0d3ea604e507fd44fca878f462c0a
|
[
"MIT"
] | 1 |
2021-10-30T10:19:00.000Z
|
2021-10-30T10:19:00.000Z
| 192.960907 | 40,580 | 0.898473 |
[
[
[
"# HOMEWORK 2 - ADM",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport methods\nimport datetime",
"_____no_output_____"
]
],
[
[
"## READ THE DATA",
"_____no_output_____"
]
],
[
[
"df_names = [\"./datasets/2019-Nov.csv\", \"./datasets/2019-Oct.csv\"]",
"_____no_output_____"
]
],
[
[
"## UNDERSTAND THE DATA ",
"_____no_output_____"
],
[
"The data that we handle for this homework come from an online store. We are going to analyze two months: October and November. For each month we have different features that we have described below: ",
"_____no_output_____"
]
],
[
[
"methods.describe_df(df_names)",
"Info for October\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 42448764 entries, 0 to 42448763\nData columns (total 9 columns):\n # Column Dtype \n--- ------ ----- \n 0 event_time object \n 1 event_type object \n 2 product_id int64 \n 3 category_id int64 \n 4 category_code object \n 5 brand object \n 6 price float64\n 7 user_id int64 \n 8 user_session object \ndtypes: float64(1), int64(3), object(5)\nmemory usage: 2.8+ GB\nNone \n\nThe total number of null values in each column is:\nevent_time 0\nevent_type 0\nproduct_id 0\ncategory_id 0\ncategory_code 13515609\nbrand 6117080\nprice 0\nuser_id 0\nuser_session 2\ndtype: int64 \n\nInfo for November\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 67501979 entries, 0 to 67501978\nData columns (total 9 columns):\n # Column Dtype \n--- ------ ----- \n 0 event_time object \n 1 event_type object \n 2 product_id int64 \n 3 category_id int64 \n 4 category_code object \n 5 brand object \n 6 price float64\n 7 user_id int64 \n 8 user_session object \ndtypes: float64(1), int64(3), object(5)\nmemory usage: 4.5+ GB\nNone \n\nThe total number of null values in each column is:\nevent_time 0\nevent_type 0\nproduct_id 0\ncategory_id 0\ncategory_code 21898171\nbrand 9224078\nprice 0\nuser_id 0\nuser_session 10\ndtype: int64 \n\n"
]
],
[
[
"We notice that there are some null values, but these are only inside two specific columns: *category_code* and *brand*. Since, for each question, we decide which columns we needed to read to answer it, we decided to handle null values inside a specific question only in the cases where they were actually relevant. (If we do so it will be specified in the *methods.py* file)",
"_____no_output_____"
],
[
"# QUESTION 1\n## Which is the rate of complete funnels?",
"_____no_output_____"
],
[
"First of all we read the DataFrame, importing only the columns that we need. In this particular case, for the subquestions 1.1-1.2-1.3 we just need *user_session*,*event_type* and *product_id*",
"_____no_output_____"
]
],
[
[
"df1 = methods.loadAllDatasets(df_names, ['user_session','event_type','product_id'])",
"_____no_output_____"
]
],
[
[
"While, for the subquestions 1.4 and 1.5 we need *user_session*,*event_type*, *product_id* and *event_time*. We also need to parse the last column so that it will be recognized as a date and not as a string",
"_____no_output_____"
]
],
[
[
"df2 = methods.loadAllDatasetsWithParser(df_names)",
"_____no_output_____"
]
],
[
[
"**RQ1.1** What’s the operation users repeat more on average within a session? Produce a plot that shows the average number of times users perform each operation (view/removefromchart etc etc)",
"_____no_output_____"
]
],
[
[
"methods.avg_operations_performed(df1)",
"_____no_output_____"
]
],
[
[
"As we can notice in the graph above, in average, users tend to view products a lot, but, only in few occasions, the put them in the cart. Also, after one item is inside a cart is not garanteed to be purchased, in fact that operation has a lower average.\n\nThe event type *remove from cart* has an average of 0 beacuse in the months of October and November never happened.",
"_____no_output_____"
],
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ1.2** How many times, on average, a user views a product before adding it to the cart?",
"_____no_output_____"
]
],
[
[
"avg = methods.avg_views_before_cart(df1)\nprint(f\"In average, a user views a product {avg} times before adding it to the cart.\")",
"In average, a user views a product 1.874 times before adding it to the cart.\n"
]
],
[
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ1.3** What’s the probability that products added once to the cart are effectively bought?",
"_____no_output_____"
]
],
[
[
"avg = methods.avg_purchase_after_cart(df1)",
"_____no_output_____"
],
[
"print(f\"The probability that products added once to the cart are effectively bought is: {avg}\")",
"The probability that products added once to the cart are effectively bought is: 0.38\n"
]
],
[
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ1.4** What’s the average time an item stays in the cart before being removed?",
"_____no_output_____"
]
],
[
[
"methods.avg_time_cart_before_removal(df2)",
"_____no_output_____"
]
],
[
[
"**!!** This function can not be runned if we take only into account October and November because, in these months the event remove_from_cart does not exists as we can see below:",
"_____no_output_____"
]
],
[
[
"df2.event_type.unique()",
"_____no_output_____"
]
],
[
[
"If we try and run it we will get an error since we are performing operations (in particular divisions) with an empty series.\n<p> </p>",
"_____no_output_____"
],
[
"**RQ1.5** How much time passes on average between the first view time and a purchase/addition to cart?",
"_____no_output_____"
],
[
"There may be different interpretation for this question: we decided to calculate the time that passes between the first view and the action add to cart / purchase for a same product id **inside** the same user_session",
"_____no_output_____"
],
[
"First we want to know the average time between the events *view* and *cart* for the same product:",
"_____no_output_____"
]
],
[
[
"avg = fn.avg_time_between_view_and_cart(df2)\nprint(f\"The average time that passes between the first time that an item is viewed and the moment in which that item is added to the cart is approximately: {round(avg,2)} mins\")",
"_____no_output_____"
]
],
[
[
"<p> </p>\nThen we want to know the average time between the events *view* and *purchase* for the same product:",
"_____no_output_____"
]
],
[
[
"avg = fn.avg_time_between_view_and_purchase(df2)\nprint(f\"The average time that passes between the first time that an item is viewed and the moment in which that item is purchased is approximately: {round(avg,2)} mins\")",
"_____no_output_____"
]
],
[
[
"<p> </p>",
"_____no_output_____"
],
[
"So, to answer the main question: we noticed that users tend to view a product an average of 1.874 times before adding it to the cart. Once an item is added to the cart, it is not garanteed to be purchased, in fact only with probability 0.38 this happens. The rest of the times the item just stays in the cart and it could expire (removed automatically) or is removed from it manually by the user.\nIn conclusion the rate of complete funnels, which is given by completed operations (from view to purchase) over number of total oprations is low.",
"_____no_output_____"
],
[
"# QUESTION 2",
"_____no_output_____"
],
[
"**RQ2.1** What are the categories of the most trending products overall? For each month visualize this information through a plot showing the number of sold products per category.",
"_____no_output_____"
],
[
"Before executing our functions we import our datasets and select only the useful columns: ***'category_code', 'event_type', 'product_id'***.\n\nThis operation is made because the csv files are heavy to load and use. Also because we will use the same dataset with the <ins>same columns</ins> for all the subquestions",
"_____no_output_____"
]
],
[
[
"#import november dataset\nndt_selection = methods.loadOneDataset(df_names[0], ['category_code', 'event_type', 'product_id'])",
"_____no_output_____"
]
],
[
[
"We imported also the october dataset",
"_____no_output_____"
]
],
[
[
"#import october dataset\nodt_selection = methods.loadOneDataset(df_names[1], ['category_code', 'event_type', 'product_id'])",
"_____no_output_____"
]
],
[
[
"<p> </p>\nTo obtain our answer we restricted the dataset to the \"event_type\" column, in our case equal to \"purchase\".",
"_____no_output_____"
],
[
"# ***NOVEMBER***",
"_____no_output_____"
]
],
[
[
"#plot the categories of the most trending products overall\nndt_select = methods.restrict_dt(ndt_selection, \"purchase\", 10)",
"_____no_output_____"
]
],
[
[
"Ww can now show the plot that we want for this question.",
"_____no_output_____"
]
],
[
[
"methods.plot_n_categories(ndt_select)",
"_____no_output_____"
]
],
[
[
"We see that the category **electronics** is the most sold.",
"_____no_output_____"
],
[
"# ***OCTOBER***",
"_____no_output_____"
]
],
[
[
"#plot the categories of the most trending products overall\nodt_select = methods.restrict_dt(odt_selection, \"purchase\", 10)",
"_____no_output_____"
]
],
[
[
"Show the plot that we want for this question.",
"_____no_output_____"
]
],
[
[
"methods.plot_n_categories(odt_select)",
"_____no_output_____"
]
],
[
[
"We see that the category **electronics** is the most sold.",
"_____no_output_____"
],
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ2.2** Plot the most visited subcategories.",
"_____no_output_____"
],
[
"In the same way, we display the subcategories and do the same thing that we saw in the last question.",
"_____no_output_____"
],
[
"# ***NOVEMBER***",
"_____no_output_____"
]
],
[
[
"#plot the most visited 30 subcategories\nsub_ndf = methods.dt_subcategories(ndt_selection)",
"_____no_output_____"
]
],
[
[
"Show the plot that we want for this question.",
"_____no_output_____"
]
],
[
[
"methods.plot_n_subcategories(sub_ndf, 30)",
"_____no_output_____"
]
],
[
[
"In this plot we see that the **smartphone** subcategory is the most sold in November.",
"_____no_output_____"
],
[
"<p> </p>",
"_____no_output_____"
],
[
"# ***OCTOBER***",
"_____no_output_____"
]
],
[
[
"#plot the most visited 30 subcategories\nsub_odf = methods.dt_subcategories(odt_selection)",
"_____no_output_____"
]
],
[
[
"Show the plot that we want for this question.",
"_____no_output_____"
]
],
[
[
"methods.plot_n_subcategories(sub_odf, 30)",
"_____no_output_____"
]
],
[
[
"In this plot we see that the **smartphone** subcategory is the most sold in October.",
"_____no_output_____"
],
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ2.3** What are the 10 most sold products per category?",
"_____no_output_____"
],
[
"# ***NOVEMBER***",
"_____no_output_____"
],
[
"For each category we want to print the 10 most sold products, but in order to have a good visualization, we decided to print only the results for the first five categories.",
"_____no_output_____"
]
],
[
[
"#the 10 most sold products per category\nmethods.most_sold_products_per_category(sub_ndf, 10)",
"The category accessories has:\nproduct_id\n18300021 53\n28401112 48\n18300155 42\n18300460 38\n28400759 36\n18300141 27\n28400912 24\n28401075 23\n18300214 23\n18300076 20\nName: category_code, dtype: int64\nThe category apparel has:\nproduct_id\n28720716 98\n54900013 89\n28716978 74\n54900004 74\n28719635 69\n28716666 65\n28719606 64\n28716519 64\n28719076 59\n28713229 58\nName: category_code, dtype: int64\nThe category appliances has:\nproduct_id\n3700926 1764\n3600661 1742\n3601405 1140\n3600666 1131\n2900958 903\n3700766 895\n3601603 895\n3600163 879\n2702277 675\n3701134 624\nName: category_code, dtype: int64\nThe category auto has:\nproduct_id\n6000094 1100\n6000227 489\n4700478 483\n5701166 274\n6000004 274\n6000229 253\n5701128 246\n5701002 244\n6000157 211\n5701086 182\nName: category_code, dtype: int64\nThe category computers has:\nproduct_id\n1307310 1329\n1307545 879\n1307188 829\n1307073 740\n1307067 538\n1307076 500\n1307589 473\n1307004 390\n1307187 388\n1307135 327\nName: category_code, dtype: int64\n"
]
],
[
[
"# ***OCTOBER***",
"_____no_output_____"
],
[
"For each category we want to print the 10 most sold products, but in order to have a good visualization, we decided to print only the results for the first five categories, as we did for November.",
"_____no_output_____"
]
],
[
[
"#the 10 most sold products per category\nmethods.most_sold_products_per_category(sub_odf, 10)",
"The category accessories has:\nproduct_id\n18300155 63\n18300021 34\n52900016 31\n28300780 24\n49800017 23\n28300432 21\n18300595 17\n28400774 16\n18300496 16\n18300214 16\nName: category_code, dtype: int64\nThe category apparel has:\nproduct_id\n28718083 72\n28715756 46\n28712682 45\n28715827 40\n28717034 39\n28715757 39\n28715829 38\n28716983 38\n28703609 38\n54900011 37\nName: category_code, dtype: int64\nThe category appliances has:\nproduct_id\n3700926 1675\n3600661 1482\n3600163 1017\n3600666 877\n2900536 831\n3601405 768\n3601485 627\n2701657 566\n3601244 559\n3701134 543\nName: category_code, dtype: int64\nThe category auto has:\nproduct_id\n6000094 785\n4700478 411\n5701128 382\n6000227 360\n5701166 304\n4700630 300\n4700589 235\n6000229 214\n6000004 206\n5700518 165\nName: category_code, dtype: int64\nThe category computers has:\nproduct_id\n1307310 1003\n1307073 864\n1307366 722\n1307067 651\n1306650 649\n1307074 416\n1307188 378\n1307187 356\n1306359 350\n1307350 324\nName: category_code, dtype: int64\n"
]
],
[
[
"# QUESTION 3\n## For each category, what’s the brand whose prices are higher on average?",
"_____no_output_____"
],
[
"To answer these questions we only need the columns 'category_code','brand','price'. For this reason we decided to read only these 3 when uploading the dataframes",
"_____no_output_____"
]
],
[
[
"df3 = methods.loadAllDatasets(df_names, ['category_code','price','brand'])",
"_____no_output_____"
]
],
[
[
"<p> </p>",
"_____no_output_____"
],
[
"**RQ3.1** Write a function that asks the user a category in input and returns a plot indicating the average price of the products sold by the brand.",
"_____no_output_____"
],
[
"First thing first, we apply a function that chooses randomly one category_code from all the ones available",
"_____no_output_____"
]
],
[
[
"category = methods.choose_category(df3)\ncategory",
"_____no_output_____"
]
],
[
[
"Now that we have our category, we call a function that shows the average price of the products sold by each brand inside it",
"_____no_output_____"
]
],
[
[
"methods.avg_price(category, df3)",
"_____no_output_____"
]
],
[
[
"So, for example, if we select the category code **apparel.shirt**, there are several brands that offers it. Among all of those, the one that, in average, offers products with a higher price, is **weekend**\n<p> </p>",
"_____no_output_____"
],
[
"**RQ3.2** Find, for each category, the brand with the highest average price. Return all the results in ascending order by price.",
"_____no_output_____"
]
],
[
[
"methods.highest_avg_price(df3) ",
"_____no_output_____"
]
],
[
[
"The Data Frame in output has 3 different columns:\n- **0**: is representing the **category_code**\n- **1**: is representing the **brand** with the highest average price in that specific category code\n- **2**: is representing the **price** associated with that specific brand\n\nSo, for example, inside the category *accessories.umbrellas*, we can see that the highest price in averege is offered by *hoco* (25,71$), and this category is the fifth cheapest af all.\nThe results are also sorted so that the prices are in order from the smallest to the largest value.\n<p> </p>",
"_____no_output_____"
],
[
"# QUESTION 4",
"_____no_output_____"
],
[
"***How much does each brand earn per month? Write a function that given the name of a brand in input returns, for each month, its profit.***",
"_____no_output_____"
],
[
"Before starting with the homework request, we pick a brand name by input for showing its profit for each month",
"_____no_output_____"
]
],
[
[
"#which brand do you want to search.. randomize the choice!\nbrand_to_search = input(\"Choose a brand to see its profit: \")",
"Choose a brand to see its profit: apple\n"
]
],
[
[
"Before executing our functions we import our datasets and we select only the useful columns: ***'event_type', 'brand', 'price'***.\n\nThis operation is made because the csv files are heavy to load and use. Also because we will use the same dataset with the <ins>same columns</ins> for all the subquestions",
"_____no_output_____"
]
],
[
[
"#import november dataset\nndt_selection = methods.loadOneDataset(df_names[0], ['event_type', 'brand', 'price'])",
"_____no_output_____"
]
],
[
[
"We imported also the october dataset",
"_____no_output_____"
]
],
[
[
"#import october dataset\nodt_selection = methods.loadOneDataset(df_names[1], ['event_type', 'brand', 'price'])",
"_____no_output_____"
]
],
[
[
"To obtain our answer we restricted the dataset to the \"event_type\" column, in our case equal to \"purchase\" compare also with the brand_to_search.",
"_____no_output_____"
],
[
"# ***NOVEMBER***",
"_____no_output_____"
]
],
[
[
"#how much does each brand earn per month?\nnew_ndt_sum = methods.restrict_bypurchase_brand(ndt_selection, brand_to_search)",
"_____no_output_____"
]
],
[
[
"How much is the profit of your brand?",
"_____no_output_____"
]
],
[
[
"print(f\"The {brand_to_search} has a profit of: {new_ndt_sum}$\")",
"The apple has a profit of: 127512524$\n"
]
],
[
[
"# ***OCTOBER***",
"_____no_output_____"
]
],
[
[
"#how much does each brand earn per month?\nnew_oct_sum = methods.restrict_bypurchase_brand(odt_selection, brand_to_search)",
"_____no_output_____"
]
],
[
[
"How much is the profit of your brand?",
"_____no_output_____"
]
],
[
[
"print(f\"The {brand_to_search} has a profit of: {new_oct_sum}$\")",
"The apple has a profit of: 111209268$\n"
]
],
[
[
"***Is the average price of products of different brands significantly different?*** We will see...",
"_____no_output_____"
],
[
"# ***NOVEMBER***",
"_____no_output_____"
]
],
[
[
"#see the average for each brand, is significant?\nnew_ndt_mean = methods.restrict_bypurchase_brand_avg(ndt_selection)\nnew_ndt_mean",
"_____no_output_____"
]
],
[
[
"# ***OCTOBER***",
"_____no_output_____"
]
],
[
[
"#see the average for each brand, is significant?\nnew_odt_mean = methods.restrict_bypurchase_brand_avg(odt_selection)\nnew_odt_mean",
"_____no_output_____"
]
],
[
[
"<span style=\"color:red\"> ***We can say that the brand name is significant for the product type (it's obvious).*** </span>\n\n<p> </p>\n\n**RQ4.1** Using the function you just created, find the top 3 brands that have suffered the biggest losses in earnings between one month and the next, specifing both the loss percentage and the 2 months (e.g., brand_1 lost 20% between march and april).",
"_____no_output_____"
]
],
[
[
"biggest_lose = methods.big_lose(ndt_selection, odt_selection)",
"_____no_output_____"
]
],
[
[
"Now we plot the top 3 brands that have suffered the biggest losses in earnings between october and november.",
"_____no_output_____"
]
],
[
[
"methods.summarize(biggest_lose)",
"The brand jonnesway has lose the 2862 % between october and november\nThe brand sunfull has lose the 2399 % between october and november\nThe brand evga has lose the 2338 % between october and november\n"
]
],
[
[
"<p> </p>\n\n# QUESTION 5\n\n## In what part of the day is your store most visited?",
"_____no_output_____"
],
[
"To answer the question, we only need to import two columns: *event_type* and *even_time*. We also need to parse the dates",
"_____no_output_____"
]
],
[
[
"df5 = methods.import_dataset5(df_names)",
"_____no_output_____"
]
],
[
[
"Now we can start analyzing our data. We can create different plots that show the most visited time of the day and the most visited day of the week",
"_____no_output_____"
]
],
[
[
"methods.most_visited_time(df5)",
"_____no_output_____"
]
],
[
[
"In this first plot we notice that usually people are more active it the afternoon, in fact the three most visited times of the day are the ones from 3PM to 5PM",
"_____no_output_____"
]
],
[
[
"methods.most_visited_day(df5)",
"_____no_output_____"
]
],
[
[
"In this second analysis we noticed that over all the days of the week **Friday** and **Saturday** are the one with more views \n\n*Notice that on the x-axis the day of the week are indicated by numbers from 0 to 6; where 0 corresponds to Monday and 6 to Sunday*.\n\n<p> </p>\n\nNow we can 'merge' the two results and see, for each day of the week the average views of our online store splitted by hour.",
"_____no_output_____"
],
[
"**RQ5.1** Create a plot that for each day of the week shows the hourly average of visitors your store has.",
"_____no_output_____"
]
],
[
[
"methods.avg_visitors_perday(df5)",
"_____no_output_____"
]
],
[
[
"**Comment:** we can see how, on average, the hours of the day where the store is most visited are between 3pm and 5pm. This is valid for every day of the week. We can also notice that the graph follows pretty much the same shape during all week: the views start increasing quickly from midnight untill there is a 'plateau' for some hours (5am - 1pm). From 1pm there is a peak in the early afternoon that ends with a drastic drop of view at around 6pm that continues untill 12pm where we have the lowest values.\n\n**To answer the question**, we suggest to invest in ads and othere marketing strategies in the early hours of the afternoon in each day, because, as the graphics suggests, those are the hours with the most affluence.",
"_____no_output_____"
],
[
"# QUESTION 6",
"_____no_output_____"
],
[
"***What's the conversion rate of your online store?***",
"_____no_output_____"
],
[
"First of all... What is the conversion rate? \n\n**Conversion rate** of a product is given by the <ins>purchase</ins> rate over the number of times the product has been visited.",
"_____no_output_____"
],
[
"...and what is the purchase rate? \n\n**Purchase rate** is the proportion of purchases versus a base metric such as users, sessions, email subscribers, etc. with a generic formula being PR = P/N where P is the number of purchases and N is the number of events during which a conversion could have occurred.",
"_____no_output_____"
]
],
[
[
"purchase_rate = methods.purchase_rate(df_names)",
"_____no_output_____"
]
],
[
[
"now we calculate the conversion rate",
"_____no_output_____"
],
[
"**RQ6.1** Find the overall conversion rate of your store.",
"_____no_output_____"
]
],
[
[
"conversion_rate = methods.conversion_rate(purchase_rate, df_names)\nconversion_rate",
"_____no_output_____"
]
],
[
[
"The value returned is a good value to understand what is going on in our online store.. this is pretty low..",
"_____no_output_____"
],
[
"**RQ6.2** Plot the number of purchases of each category and show the conversion rate of each category in decreasing order.",
"_____no_output_____"
]
],
[
[
"purchase_dt = methods.percategory_show_purchases(df_names)",
"_____no_output_____"
]
],
[
[
"Now we will prepare our conversion rate per each category. \n\nWe want to have the number of categories, then we will make the conversion rate.",
"_____no_output_____"
]
],
[
[
"eachcategory_num = methods.number_categories(df_names)\neachcategory_num",
"_____no_output_____"
]
],
[
[
"the conversion rate is for each category in **decreasing** oder",
"_____no_output_____"
]
],
[
[
"cvrate = methods.conversion_rate_percategory(df_names, eachcategory_num)\ncvrate",
"_____no_output_____"
]
],
[
[
"we can say that there are lower purchase rates presented in our online store",
"_____no_output_____"
],
[
"# QUESTION 7",
"_____no_output_____"
],
[
"The Pareto principle states that for many outcomes roughly 80% of consequences come from 20% of the causes. Also known as 80/20 rule, in e-commerce simply means that most of your business, around 80%, likely comes from about 20% of your customers.\n**Prove that the pareto principle applies to your store.**",
"_____no_output_____"
],
[
"To answer this question we first have to import only the columns of the concatenated dataset that we need. In this case those are: *event_type*, *user_id*, *price* and *product_id*",
"_____no_output_____"
]
],
[
[
"df7 = methods.loadAllDatasets(df_names,['event_type','price','user_id','product_id'])",
"_____no_output_____"
]
],
[
[
"Now we can calculate the total profits of our store given from all the purchases made by all the users in two months. We also compute the 80% of them",
"_____no_output_____"
]
],
[
[
"purchases = df7[df7.event_type=='purchase'].price.sum()\npur_80 = .8*purchases\nprint(f'80% of the profit corresponds to {int(pur_80)}$')",
"80% of the profit corresponds to 404121914$\n"
]
],
[
[
"We can now calculate how much the top 20% of the users (the ones that spent the most) spent during the whole months of October and November",
"_____no_output_____"
]
],
[
[
"profit_from_top_users = methods.proof_pareto_principle(df7)",
"_____no_output_____"
],
[
"print(f'20% of the top users spend {int(profit_from_top_users)}$')",
"20% of the top users spend 364222233$\n"
]
],
[
[
"As we can see, the two results are close.\n\nThis proves the Pareto Principle that states, in fact, how usually 80% of the profits for a store, come from 20% of the users.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a23978c757581e50a4e13fdb87d0f538103dc90
| 88,068 |
ipynb
|
Jupyter Notebook
|
analysis/histograms.ipynb
|
marcpare/whitenoise-samples
|
b4154cdd38efcfb1ba946cf2443d31a1b0d2e666
|
[
"MIT"
] | null | null | null |
analysis/histograms.ipynb
|
marcpare/whitenoise-samples
|
b4154cdd38efcfb1ba946cf2443d31a1b0d2e666
|
[
"MIT"
] | null | null | null |
analysis/histograms.ipynb
|
marcpare/whitenoise-samples
|
b4154cdd38efcfb1ba946cf2443d31a1b0d2e666
|
[
"MIT"
] | null | null | null | 236.741935 | 15,868 | 0.909763 |
[
[
[
"# Histograms\nThis notebook demonstrates simple use of histograms in wn.",
"_____no_output_____"
],
[
"### Set up libraries and load exemplar dataset",
"_____no_output_____"
]
],
[
[
"# load libraries\nimport os\nimport opendp.whitenoise.core as wn\nimport numpy as np\nimport math\nimport statistics\n\n# establish data information\ndata_path = os.path.join('.', 'data', 'PUMS_california_demographics_1000', 'data.csv')\nvar_names = [\"age\", \"sex\", \"educ\", \"race\", \"income\", \"married\"]\n\ndata = np.genfromtxt(data_path, delimiter=',', names=True)\nage = list(data[:]['age'])\n\nprint(\"Dimension of dataset: \" + str(data.shape))\nprint(\"Names of variables: \" + str(data.dtype.names))",
"Dimension of dataset: (1000,)\nNames of variables: ('age', 'sex', 'educ', 'race', 'income', 'married')\n"
]
],
[
[
"### Creating DP Releases of Histograms\n\nThe default method for generating a histogram in WhiteNoise is by releasing counts of each bin or category using the geometric mechanism. The geometric mechanism only returns integer values for any query, so resists some vulnerabilities of DP releases from floating point approximations (see Mironov 2012). It is also possible, however, to generate histograms from the more typical Laplace mechanism. We show both approaches below.\n\nHere we generate histograms on three types of variables:\n* A continuous variable, here `income`, where the set of numbers have to be divided into bins,\n* A boolean or dichotomous variable, here `sex`, that can only take on two values,\n* A categorical variable, here `education`, where there are distinct categories enumerated as strings.\n\nNote the education variable is coded in the data on a scale from 1 to 16, but we're leaving the coded values as strings throughout this notebook.",
"_____no_output_____"
]
],
[
[
"income_edges = list(range(0, 100000, 10000))\neducation_categories = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\"]\n\n\nwith wn.Analysis() as analysis:\n data = wn.Dataset(path = data_path, column_names = var_names)\n nsize = 1000 \n \n income_histogram = wn.dp_histogram(\n wn.to_int(data['income'], lower=0, upper=100),\n edges = income_edges,\n upper = nsize,\n null_value = 150,\n privacy_usage = {'epsilon': 0.5}\n )\n\n income_prep = wn.histogram(wn.to_int(data['income'], lower=0, upper=100000),\n edges=income_edges, null_value =-1)\n income_histogram2 = wn.laplace_mechanism(income_prep, privacy_usage={\"epsilon\": 0.5, \"delta\": .000001})\n \n sex_histogram = wn.dp_histogram(\n wn.to_bool(data['sex'], true_label=\"0\"),\n upper = nsize,\n privacy_usage = {'epsilon': 0.5}\n )\n \n sex_prep = wn.histogram(wn.to_bool(data['sex'], true_label=\"0\"), null_value = True)\n sex_histogram2 = wn.laplace_mechanism(sex_prep, privacy_usage={\"epsilon\": 0.5, \"delta\": .000001})\n \n education_histogram = wn.dp_histogram(\n data['educ'],\n categories = education_categories,\n null_value = \"-1\",\n privacy_usage = {'epsilon': 0.5}\n )\n\n education_prep = wn.histogram(data['educ'],\n categories = education_categories, null_value = \"-1\")\n education_histogram2 = wn.laplace_mechanism(education_prep, privacy_usage={\"epsilon\": 0.5, \"delta\": .000001})\n \n\nanalysis.release()\n\nprint(\"Income histogram Geometric DP release: \" + str(income_histogram.value))\nprint(\"Income histogram Laplace DP release: \" + str(income_histogram2.value))\n\nprint(\"Sex histogram Geometric DP release: \" + str(sex_histogram.value))\nprint(\"Sex histogram Laplace DP release: \" + str(sex_histogram2.value))\n\nprint(\"Education histogram Geometric DP release:\" + str(education_histogram.value))\nprint(\"Education histogram Laplace DP release: \" + str(education_histogram2.value))",
"Income histogram Geometric DP release: [278 196 96 103 65 62 53 104 39 85]\nIncome histogram Laplace DP release: [295.3052913 186.53526817 123.29567384 100.85650317 60.21639407\n 47.01726179 40.39806265 19.93649819 16.99358144 75.37529966]\nSex histogram Geometric DP release: [485 514]\nSex histogram Laplace DP release: [486.63588064 539.19028398]\nEducation histogram Geometric DP release:[ 24 9 25 5 40 53 53 37 207 19 167 59 178 23 32 18 6]\nEducation histogram Laplace DP release: [ 32.99434939 18.3283286 41.24380174 10.64177579 17.71485788\n 16.44570654 35.4852772 54.55488846 197.43218538 59.72384568\n 169.34338544 75.37139662 179.65393207 57.39920629 19.23223424\n 5.08898451 9.42213613]\n"
]
],
[
[
"We can see most obviously that the releases from the Geometric mechanism are integer counts, while the Laplace releases are floating point numbers.\n\nBelow, we will quickly create histograms of the actual private data, for a point of comparison to our differentially private releases:",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ndata = np.genfromtxt(data_path, delimiter=',', names=True)\nincome = list(data[:]['income'])\nsex = list(data[:]['sex'])\neducation = list(data[:]['educ'])\n\n# An \"interface\" to matplotlib.axes.Axes.hist() method\nn_income, bins, patches = plt.hist(income, bins=list(range(0,110000,10000)), color='#0504aa',\n alpha=0.7, rwidth=0.85)\nplt.grid(axis='y', alpha=0.75)\nplt.xlabel('Income')\nplt.ylabel('Frequency')\nplt.title('True Dataset Income Distribution')\nplt.show()\n\nn_sex, bins, patches = plt.hist(sex, bins=[-0.5,0.5,1.5], color='#0504aa',\n alpha=0.7, rwidth=0.85)\nplt.grid(axis='y', alpha=0.75)\nplt.xlabel('Sex')\nplt.ylabel('Frequency')\nplt.title('True Dataset Sex Distribution')\nplt.show()\n\nn_educ, bins, patches = plt.hist(education, bins=list(range(1,19,1)), color='#0504aa',\n alpha=0.7, rwidth=0.85)\nplt.grid(axis='y', alpha=0.75)\nplt.xlabel('Education')\nplt.ylabel('Frequency')\nplt.title('True Dataset Education Distribution')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Below we can see the differentially private releases of these variables in shades of red, against the \"true\" private counts in green.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\ncolorseq = [\"forestgreen\", \"indianred\", \"orange\", \"orangered\", \"orchid\"]\n\nfig = plt.figure()\nax = fig.add_axes([0,0,1,1])\nplt.ylim([-100,500])\n#inccat = [\"10k\",\"20k\",\"30k\",\"40k\",\"50k\",\"60k\",\"70k\",\"80k\",\"90k\",\"100k\"]\ninccat = [10,20,30,40,50,60,70,80,90,100]\nwidth=3\ninccat_left = [x + width for x in inccat]\ninccat_right = [x + 2*width for x in inccat]\nax.bar(inccat, n_income, width=width, color=colorseq[0], label='True Value')\nax.bar(inccat_left, income_histogram.value, width=width, color=colorseq[1], label='DP Geometric')\nax.bar(inccat_right, income_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')\nax.legend()\nplt.title('Histogram of Income')\nplt.xlabel('Income, in thousands')\nplt.ylabel('Count')\nplt.show()\n\n\n\nfig = plt.figure()\nax = fig.add_axes([0,0,1,1])\nplt.ylim([0,800])\nsexcat = [0,1]\nwidth = 0.2\nsexcat_left = [x + width for x in sexcat]\nsexcat_right = [x + 2*width for x in sexcat]\nax.bar(sexcat, n_sex, width=width, color=colorseq[0], label='True Value')\nax.bar(sexcat_left, sex_histogram.value, width=width, color=colorseq[1], label='DP Geometric')\nax.bar(sexcat_right, sex_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')\nax.legend()\nplt.title('Histogram of Sex')\nplt.ylabel('Count')\nplt.show()\n\n\nfig = plt.figure()\nax = fig.add_axes([0,0,1,1])\nedcat = list(range(1,18))\nwidth = 0.25\nedcat_left = [x + width for x in edcat]\nedcat_right = [x + 2*width for x in edcat]\nax.bar(edcat, n_educ, width=width, color=colorseq[0], label='True Value')\nax.bar(edcat_left, education_histogram.value, width=width, color=colorseq[1], label='DP Geometric')\nax.bar(edcat_right, education_histogram2.value, width=width, color=colorseq[2], label='DP Laplace')\nax.legend()\nplt.title('Histogram of Education')\nplt.xlabel('Educational Attainment Category')\nplt.ylabel('Count')\nplt.show()\n",
"_____no_output_____"
]
],
[
[
"## References\n\nMironov, Ilya. \"On significance of the least significant bits for differential privacy.\" In Proceedings of the 2012 ACM conference on Computer and communications security, pp. 650-661. 2012.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a239980cce5c24f19c85e01a5322290f2373017
| 177,553 |
ipynb
|
Jupyter Notebook
|
Verifying inputs & outputs.ipynb
|
neuroailab/r3m_brainscore
|
5707f72d4d0dca0e789608342709be4ea54c75d2
|
[
"MIT"
] | null | null | null |
Verifying inputs & outputs.ipynb
|
neuroailab/r3m_brainscore
|
5707f72d4d0dca0e789608342709be4ea54c75d2
|
[
"MIT"
] | null | null | null |
Verifying inputs & outputs.ipynb
|
neuroailab/r3m_brainscore
|
5707f72d4d0dca0e789608342709be4ea54c75d2
|
[
"MIT"
] | null | null | null | 100.19921 | 69,512 | 0.799485 |
[
[
[
"import os",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"import torch\nimport torchvision.transforms as T\nimport numpy as np\nfrom PIL import Image",
"_____no_output_____"
]
],
[
[
"## Verifying image loading is the right format",
"_____no_output_____"
]
],
[
[
"path = '/home/yamins/.local/lib/python3.7/site-packages/model_tools/check_submission/images'",
"_____no_output_____"
],
[
"from model_tools.activations.pytorch import load_preprocess_images",
"_____no_output_____"
],
[
"import functools",
"_____no_output_____"
],
[
"preprocessing = functools.partial(load_preprocess_images, image_size=224)",
"_____no_output_____"
],
[
"load_preprocess_images?",
"_____no_output_____"
],
[
"impath = os.path.join(path, '10.png')\nim = load_preprocess_images([impath], image_size=224)",
"_____no_output_____"
],
[
"im.shape",
"_____no_output_____"
],
[
"imval = im[0].swapaxes(0, 1).swapaxes(1, 2)",
"_____no_output_____"
],
[
"plt.imshow(imval)",
"Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).\n"
],
[
"load_preprocess_images",
"_____no_output_____"
],
[
"impath = os.path.join(path, '10.png')\n\nimval = Image.open(impath)",
"_____no_output_____"
],
[
"impaths = [os.path.join(path, '%d.png' % i) for i in range(10, 20)]",
"_____no_output_____"
],
[
"imval",
"_____no_output_____"
],
[
"imval.mode",
"_____no_output_____"
],
[
"def load_preprocess_images(image_filepaths, image_size, **kwargs):\n images = load_images(image_filepaths)\n images = preprocess_images(images, image_size=image_size, **kwargs)\n return images\n\n\ndef load_images(image_filepaths):\n return [load_image(image_filepath) for image_filepath in image_filepaths]\n\n\ndef load_image(image_filepath):\n with Image.open(image_filepath) as pil_image:\n if 'L' not in pil_image.mode.upper() and 'A' not in pil_image.mode.upper() and 'P' not in pil_image.mode.upper():\n return pil_image.copy()\n else: # make sure potential binary images are in RGB\n rgb_image = Image.new(\"RGB\", pil_image.size)\n rgb_image.paste(pil_image)\n return rgb_image\n\n\ndef preprocess_images(images, image_size, **kwargs):\n preprocess = torchvision_preprocess_input(image_size, **kwargs)\n images = [preprocess(image) for image in images]\n images = np.concatenate(images)\n return images\n\n\ndef torchvision_preprocess_input(image_size, **kwargs):\n from torchvision import transforms\n return transforms.Compose([\n transforms.Resize((image_size, image_size)),\n torchvision_preprocess(**kwargs),\n ])\n\ndef torchvision_preprocess(normalize_mean=(0.485, 0.456, 0.406), normalize_std=(0.229, 0.224, 0.225)):\n from torchvision import transforms\n return transforms.Compose([\n transforms.ToTensor(),\n lambda img: 255 * img.unsqueeze(0)\n ])\n",
"_____no_output_____"
],
[
"imval = load_preprocess_images([impath], image_size=224)",
"_____no_output_____"
],
[
"imval.shape",
"_____no_output_____"
],
[
"imval.dtype",
"_____no_output_____"
]
],
[
[
"## looking at model outputs",
"_____no_output_____"
]
],
[
[
"from model_tools.activations.pytorch import PytorchWrapper",
"_____no_output_____"
],
[
"from r3m import load_r3m",
"_____no_output_____"
],
[
"r3m18cpu = load_r3m(\"resnet18\") # resnet18, resnet34\nr3m18cpu.eval();\nr3m18cpu = r3m18cpu.module.to('cpu')",
"_____no_output_____"
],
[
"preprocessing = functools.partial(load_preprocess_images, image_size=224)\nr3m18_wrapper = PytorchWrapper(identifier='r3m18', model=r3m18cpu, preprocessing=preprocessing)\nr3m18_wrapper.image_size = 224\n",
"_____no_output_____"
],
[
"r3m18_wrapper",
"_____no_output_____"
],
[
"outval = r3m18_wrapper(impaths, layers=['convnet.conv1'])",
"activations: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 64/64 [00:00<00:00, 216.77it/s]\nlayer packaging: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:02<00:00, 2.00s/it]\n"
],
[
"outval.shape",
"_____no_output_____"
],
[
"64*112*112",
"_____no_output_____"
],
[
"outval = r3m18_wrapper(impaths, layers=['convnet.layer1.0.relu'])",
"activations: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 64/64 [00:00<00:00, 255.43it/s]\nlayer packaging: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 2.00it/s]\n"
],
[
"outval.shape",
"_____no_output_____"
],
[
"64*56*56",
"_____no_output_____"
],
[
"outval = r3m18_wrapper([impath], layers=['convnet.layer4.1.relu'])",
"activations: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 64/64 [00:00<00:00, 293.46it/s]\nlayer packaging: 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 15.35it/s]\n"
],
[
"512*7*7",
"_____no_output_____"
],
[
"outval.shape",
"_____no_output_____"
],
[
"outval1 = r3m18_wrapper([impath], layers=['convnet.avgpool'])",
"activations: 100%|███████████████████| 64/64 [00:00<00:00, 313.18it/s]\nlayer packaging: 100%|█████████████████| 1/1 [00:00<00:00, 123.63it/s]\n"
],
[
"outval1.shape",
"_____no_output_____"
],
[
"outval2 = r3m18_wrapper([impath], layers=['convnet.fc'])",
"activations: 100%|███████████████████| 64/64 [00:00<00:00, 347.26it/s]\nlayer packaging: 100%|█████████████████| 1/1 [00:00<00:00, 148.32it/s]\n"
],
[
"outval2.shape",
"_____no_output_____"
],
[
"outval2",
"_____no_output_____"
],
[
"outvals2 = r3m18_wrapper(impaths, layers=['convnet.fc'])",
"activations: 100%|███████████████████| 64/64 [00:00<00:00, 267.80it/s]\nlayer packaging: 100%|█████████████████| 1/1 [00:00<00:00, 146.63it/s]\n"
],
[
"outvals2.shape",
"_____no_output_____"
],
[
"layers = ['convnet.conv1',\n 'convnet.maxpool',\n 'convnet.layer1.1',\n 'convnet.layer2.1'\n 'convnet.layer3.1',\n 'convnet.layer4.1',\n 'convnet.fc']",
"_____no_output_____"
],
[
"impaths = [os.path.join(path, '%d.png' % i) for i in range(10, 20)]\noutvals3 = r3m18_wrapper(impaths, layers=['convnet.avgpool'])",
"_____no_output_____"
],
[
"outvals3.shape",
"_____no_output_____"
],
[
"import r3m_pytorch",
"_____no_output_____"
]
],
[
[
"### resnet50",
"_____no_output_____"
]
],
[
[
"r3m34cpu = load_r3m(\"resnet34\") # resnet18, resnet34\nr3m34cpu = r3m34cpu.module.to('cpu')\n\nr3m34_wrapper = PytorchWrapper(identifier='r3m34', model=r3m34cpu, preprocessing=preprocessing)\nr3m34_wrapper.image_size = 224\n",
"_____no_output_____"
],
[
"outval = r3m34_wrapper(impaths[:1], layers=['convnet.layer1.0.relu'])",
"activations: 100%|█████████████████████████████████████████████| 64/64 [00:09<00:00, 6.56it/s]\nlayer packaging: 100%|███████████████████████████████████████████| 1/1 [00:01<00:00, 1.29s/it]\n"
],
[
"outval.shape",
"_____no_output_____"
],
[
"def load_preprocess_images_2(image_filepaths, crop_size):\n \"\"\"\n define custom pre-processing here since R3M does not normalize like other models\n :seealso: r3m/example.py\n \"\"\"\n images = load_images(image_filepaths)\n # preprocessing\n transforms = T.Compose([\n T.Resize(256),\n T.CenterCrop(crop_size),\n T.ToTensor(), # ToTensor() divides by 255\n lambda img: img.unsqueeze(0),\n ])\n images = [transforms(image) * 255.0 for image in images] # R3M expects image input to be [0-255]\n images = np.concatenate(images)\n return images\n\npreprocessing2 = functools.partial(load_preprocess_images_2, crop_size=224)",
"_____no_output_____"
],
[
"r3m34cpu = load_r3m(\"resnet34\") # resnet18, resnet34\nr3m34cpu = r3m34cpu.module.to('cpu')\n\nr3m34_wrapper = PytorchWrapper(identifier='r3m34', model=r3m34cpu, preprocessing=preprocessing2)\nr3m34_wrapper.image_size = 224\n",
"_____no_output_____"
],
[
"outval = r3m34_wrapper(impaths[:1], layers=['convnet.layer1.0.relu'])",
"activations: 100%|█████████████████████████████████████████████| 64/64 [00:02<00:00, 25.89it/s]\nlayer packaging: 100%|███████████████████████████████████████████| 1/1 [00:00<00:00, 1.96it/s]\n"
],
[
"outval.shape",
"_____no_output_____"
],
[
"outval2 = r3m34_wrapper(impaths[:1], layers=['convnet.avgpool'])",
"activations: 100%|█████████████████████████████████████████████| 64/64 [00:03<00:00, 17.78it/s]\nlayer packaging: 100%|██████████████████████████████████████████| 1/1 [00:00<00:00, 105.85it/s]\n"
],
[
"outval2.shape",
"_____no_output_____"
],
[
"r3m50cpu = load_r3m(\"resnet50\") # resnet18, resnet34\nr3m50cpu = r3m50cpu.module.to('cpu')\n\nr3m50_wrapper = PytorchWrapper(identifier='r3m50', model=r3m50cpu, preprocessing=preprocessing2)\nr3m50_wrapper.image_size = 224\n",
"_____no_output_____"
],
[
"outval2 = r3m50_wrapper(impaths[:1], layers=['convnet.avgpool'])",
"activations: 100%|█████████████████████████████████████████████| 64/64 [00:11<00:00, 5.60it/s]\nlayer packaging: 100%|███████████████████████████████████████████| 1/1 [00:00<00:00, 28.41it/s]\n"
],
[
"outval2.shape",
"_____no_output_____"
],
[
"outval = r3m50_wrapper(impaths[:1], layers=['convnet.layer1.0.relu'])\noutval.shape",
"activations: 100%|█████████████████████████████████████████████| 64/64 [00:10<00:00, 6.29it/s]\nlayer packaging: 100%|███████████████████████████████████████████| 1/1 [00:04<00:00, 4.34s/it]\n"
],
[
"256*56*56",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a23b194023e3824d909c7fdc40ce22599b086e2
| 154,481 |
ipynb
|
Jupyter Notebook
|
causal-inference-tutorial.ipynb
|
ssameermah/Causal-Inference-examples
|
c25c39f93b0047bee83b9291efd53843966ae137
|
[
"MIT"
] | null | null | null |
causal-inference-tutorial.ipynb
|
ssameermah/Causal-Inference-examples
|
c25c39f93b0047bee83b9291efd53843966ae137
|
[
"MIT"
] | null | null | null |
causal-inference-tutorial.ipynb
|
ssameermah/Causal-Inference-examples
|
c25c39f93b0047bee83b9291efd53843966ae137
|
[
"MIT"
] | null | null | null | 161.929769 | 123,264 | 0.882685 |
[
[
[
"!pip install econml",
"Collecting econml\r\n Downloading econml-0.12.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl (3.1 MB)\r\n |████████████████████████████████| 3.1 MB 241 kB/s \r\n\u001b[?25hCollecting shap<0.40.0,>=0.38.1\r\n Downloading shap-0.39.0.tar.gz (356 kB)\r\n |████████████████████████████████| 356 kB 49.0 MB/s \r\n\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l-\b \b\\\b \bdone\r\n\u001b[?25hRequirement already satisfied: numpy in /opt/conda/lib/python3.7/site-packages (from econml) (1.19.5)\r\nRequirement already satisfied: statsmodels>=0.10 in /opt/conda/lib/python3.7/site-packages (from econml) (0.12.2)\r\nRequirement already satisfied: scikit-learn>0.22.0 in /opt/conda/lib/python3.7/site-packages (from econml) (0.23.2)\r\nRequirement already satisfied: lightgbm in /opt/conda/lib/python3.7/site-packages (from econml) (3.3.1)\r\nCollecting sparse\r\n Downloading sparse-0.13.0-py2.py3-none-any.whl (77 kB)\r\n |████████████████████████████████| 77 kB 4.5 MB/s \r\n\u001b[?25hRequirement already satisfied: scipy>1.4.0 in /opt/conda/lib/python3.7/site-packages (from econml) (1.7.2)\r\nRequirement already satisfied: pandas in /opt/conda/lib/python3.7/site-packages (from econml) (1.3.4)\r\nCollecting dowhy\r\n Downloading dowhy-0.7-py3-none-any.whl (152 kB)\r\n |████████████████████████████████| 152 kB 47.9 MB/s \r\n\u001b[?25hRequirement already satisfied: numba!=0.42.1 in /opt/conda/lib/python3.7/site-packages (from econml) (0.54.1)\r\nRequirement already satisfied: joblib>=0.13.0 in /opt/conda/lib/python3.7/site-packages (from econml) (1.1.0)\r\nRequirement already satisfied: llvmlite<0.38,>=0.37.0rc1 in /opt/conda/lib/python3.7/site-packages (from numba!=0.42.1->econml) (0.37.0)\r\nRequirement already satisfied: setuptools in /opt/conda/lib/python3.7/site-packages (from numba!=0.42.1->econml) (59.1.1)\r\nRequirement already satisfied: threadpoolctl>=2.0.0 in /opt/conda/lib/python3.7/site-packages (from scikit-learn>0.22.0->econml) (3.0.0)\r\nRequirement already satisfied: tqdm>4.25.0 in /opt/conda/lib/python3.7/site-packages (from shap<0.40.0,>=0.38.1->econml) (4.62.3)\r\nRequirement already satisfied: slicer==0.0.7 in /opt/conda/lib/python3.7/site-packages (from shap<0.40.0,>=0.38.1->econml) (0.0.7)\r\nRequirement already satisfied: cloudpickle in /opt/conda/lib/python3.7/site-packages (from shap<0.40.0,>=0.38.1->econml) (2.0.0)\r\nRequirement already satisfied: patsy>=0.5 in /opt/conda/lib/python3.7/site-packages (from statsmodels>=0.10->econml) (0.5.2)\r\nRequirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/lib/python3.7/site-packages (from pandas->econml) (2.8.0)\r\nRequirement already satisfied: pytz>=2017.3 in /opt/conda/lib/python3.7/site-packages (from pandas->econml) (2021.3)\r\nRequirement already satisfied: pydot>=1.4 in /opt/conda/lib/python3.7/site-packages (from dowhy->econml) (1.4.2)\r\nRequirement already satisfied: networkx>=2.0 in /opt/conda/lib/python3.7/site-packages (from dowhy->econml) (2.6.3)\r\nRequirement already satisfied: sympy>=1.4 in /opt/conda/lib/python3.7/site-packages (from dowhy->econml) (1.9)\r\nRequirement already satisfied: wheel in /opt/conda/lib/python3.7/site-packages (from lightgbm->econml) (0.37.0)\r\nRequirement already satisfied: six in /opt/conda/lib/python3.7/site-packages (from patsy>=0.5->statsmodels>=0.10->econml) (1.16.0)\r\nRequirement already satisfied: pyparsing>=2.1.4 in /opt/conda/lib/python3.7/site-packages (from pydot>=1.4->dowhy->econml) (3.0.6)\r\nRequirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.7/site-packages (from sympy>=1.4->dowhy->econml) (1.2.1)\r\nBuilding wheels for collected packages: shap\r\n Building wheel for shap (setup.py) ... \u001b[?25l-\b \b\\\b \b|\b \b/\b \b-\b \b\\\b \b|\b \bdone\r\n\u001b[?25h Created wheel for shap: filename=shap-0.39.0-cp37-cp37m-linux_x86_64.whl size=544687 sha256=445ef5154401c3539d7f5e36cd9a367244536c5f3536b77d40b93faca179bc5a\r\n Stored in directory: /root/.cache/pip/wheels/ca/25/8f/6ae5df62c32651cd719e972e738a8aaa4a87414c4d2b14c9c0\r\nSuccessfully built shap\r\nInstalling collected packages: sparse, shap, dowhy, econml\r\n Attempting uninstall: shap\r\n Found existing installation: shap 0.40.0\r\n Uninstalling shap-0.40.0:\r\n Successfully uninstalled shap-0.40.0\r\nSuccessfully installed dowhy-0.7 econml-0.12.0 shap-0.39.0 sparse-0.13.0\r\n\u001b[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001b[0m\r\n"
],
[
"# Some imports to get us started\nimport warnings\nwarnings.simplefilter('ignore')\n\n# Utilities\nimport os\nimport urllib.request\nimport numpy as np\nimport pandas as pd\nfrom networkx.drawing.nx_pydot import to_pydot\nfrom IPython.display import Image, display\n\n# Generic ML imports\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.ensemble import GradientBoostingRegressor\n\n# EconML imports\nfrom econml.dml import LinearDML, CausalForestDML\nfrom econml.cate_interpreter import SingleTreeCateInterpreter, SingleTreePolicyInterpreter\n\nimport matplotlib.pyplot as plt\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# Import the sample pricing data\nfile_url = \"https://msalicedatapublic.blob.core.windows.net/datasets/Pricing/pricing_sample.csv\"\ntrain_data = pd.read_csv(file_url)",
"_____no_output_____"
],
[
"train_data.head()",
"_____no_output_____"
],
[
"#estimator inputs\ntrain_data[\"log_demand\"] = np.log(train_data[\"demand\"])\ntrain_data[\"log_price\"] = np.log(train_data[\"price\"])\n\nY = train_data[\"log_demand\"].values\nT = train_data[\"log_price\"].values\nX = train_data[[\"income\"]].values # features\nconfounder_names = [\"account_age\", \"age\", \"avg_hours\", \"days_visited\", \"friends_count\", \"has_membership\", \"is_US\", \"songs_purchased\"]\nW = train_data[confounder_names].values",
"_____no_output_____"
],
[
"# Get test data\nX_test = np.linspace(0, 5, 100).reshape(-1, 1)\nX_test_data = pd.DataFrame(X_test, columns=[\"income\"])",
"_____no_output_____"
]
],
[
[
"## Create Causal Model ",
"_____no_output_____"
]
],
[
[
"# initiate an EconML cate estimator\nest = LinearDML(model_y=GradientBoostingRegressor(), model_t=GradientBoostingRegressor(),\n featurizer=PolynomialFeatures(degree=2, include_bias=False))",
"_____no_output_____"
],
[
"# fit through dowhy\nest_dw = est.dowhy.fit(Y, T, X=X, W=W, outcome_names=[\"log_demand\"], treatment_names=[\"log_price\"], feature_names=[\"income\"],\n confounder_names=confounder_names, inference=\"statsmodels\")",
"_____no_output_____"
],
[
"# Visualize causal graph\ntry:\n # Try pretty printing the graph. Requires pydot and pygraphviz\n display(\n Image(to_pydot(est_dw._graph._graph).create_png())\n )\nexcept:\n # Fall back on default graph view\n est_dw.view_model() ",
"_____no_output_____"
],
[
"identified_estimand = est_dw.identified_estimand_\nprint(identified_estimand)",
"Estimand type: nonparametric-ate\n\n### Estimand : 1\nEstimand name: backdoor\nEstimand expression:\n d \n────────────(Expectation(log_demand|is_US,avg_hours,age,songs_purchased,income\nd[log_price] \n\n \n,friends_count,has_membership,account_age,days_visited))\n \nEstimand assumption 1, Unconfoundedness: If U→{log_price} and U→log_demand then P(log_demand|log_price,is_US,avg_hours,age,songs_purchased,income,friends_count,has_membership,account_age,days_visited,U) = P(log_demand|log_price,is_US,avg_hours,age,songs_purchased,income,friends_count,has_membership,account_age,days_visited)\n\n### Estimand : 2\nEstimand name: iv\nNo such variable(s) found!\n\n### Estimand : 3\nEstimand name: frontdoor\nNo such variable(s) found!\n\n"
],
[
"# initiate an EconML cate estimator\nest_nonparam = CausalForestDML(model_y=GradientBoostingRegressor(), model_t=GradientBoostingRegressor())\n# fit through dowhy\nest_nonparam_dw = est_nonparam.dowhy.fit(Y, T, X=X, W=W, outcome_names=[\"log_demand\"], treatment_names=[\"log_price\"],\n feature_names=[\"income\"], confounder_names=confounder_names, inference=\"blb\")",
"_____no_output_____"
]
],
[
[
"# Test Estimate Robustness with DoWhy",
"_____no_output_____"
],
[
"## Add Random Common Cause",
"_____no_output_____"
],
[
"How robust are our estimates to adding another confounder?",
"_____no_output_____"
]
],
[
[
"res_random = est_nonparam_dw.refute_estimate(method_name=\"random_common_cause\")\nprint(res_random)",
"Refute: Add a random common cause\nEstimated effect:-0.9573995973779295\nNew effect:-0.95832930881448\np value:0.37\n\n"
]
],
[
[
"How robust are our estimates to unobserved confounders",
"_____no_output_____"
]
],
[
[
"res_unobserved = est_nonparam_dw.refute_estimate(\n method_name=\"add_unobserved_common_cause\",\n confounders_effect_on_treatment=\"linear\",\n confounders_effect_on_outcome=\"linear\",\n effect_strength_on_treatment=0.1,\n effect_strength_on_outcome=0.1,\n)\nprint(res_unobserved)",
"Refute: Add an Unobserved Common Cause\nEstimated effect:-0.9573995973779295\nNew effect:-1.010149368734301\n\n"
]
],
[
[
"## Replace Treatment with a Random (Placebo) Variable\n",
"_____no_output_____"
],
[
"What happens our estimates if we replace the treatment variable with noise?",
"_____no_output_____"
]
],
[
[
"res_placebo = est_nonparam_dw.refute_estimate(\n method_name=\"placebo_treatment_refuter\", placebo_type=\"permute\", \n num_simulations=3\n)\nprint(res_placebo)",
"Refute: Use a Placebo Treatment\nEstimated effect:-0.9573995973779295\nNew effect:-0.009779517229192933\np value:0.15574829607146912\n\n"
]
],
[
[
"## Remove a Random Subset of the Data",
"_____no_output_____"
],
[
"Do we recover similar estimates on subsets of the data?",
"_____no_output_____"
]
],
[
[
"res_subset = est_nonparam_dw.refute_estimate(\n method_name=\"data_subset_refuter\", subset_fraction=0.8, \n num_simulations=3)\nprint(res_subset)",
"Refute: Use a subset of data\nEstimated effect:-0.9573995973779295\nNew effect:-0.9517490711399156\np value:0.31210482684964586\n\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a23c6efcba5c10395b73eeb45e5b40d33633d19
| 893 |
ipynb
|
Jupyter Notebook
|
v1.52.2/Functions/8. RM-GF correlations.ipynb
|
CyberCRI/herocoli-metrics-redwire
|
abd2e73f3ec6a5088be3302074d5b4b2a1d65362
|
[
"CC0-1.0"
] | 1 |
2017-01-18T13:46:31.000Z
|
2017-01-18T13:46:31.000Z
|
v1.52.2/Functions/8. RM-GF correlations.ipynb
|
CyberCRI/herocoli-metrics-redwire
|
abd2e73f3ec6a5088be3302074d5b4b2a1d65362
|
[
"CC0-1.0"
] | null | null | null |
v1.52.2/Functions/8. RM-GF correlations.ipynb
|
CyberCRI/herocoli-metrics-redwire
|
abd2e73f3ec6a5088be3302074d5b4b2a1d65362
|
[
"CC0-1.0"
] | null | null | null | 17.86 | 74 | 0.520717 |
[
[
[
"# 8. RM-GF correlations",
"_____no_output_____"
]
],
[
[
"%run \"../Functions/7. Question groups variation analysis.ipynb\"\nprint(\"8. RM-GF correlations\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a23f6707cf9ceadc3bd6da3cc6709500304e3d4
| 96,446 |
ipynb
|
Jupyter Notebook
|
qiskit/advanced/aqua/finance/data_providers/time_series.ipynb
|
gvvynplaine/qiskit-iqx-tutorials
|
40af3da7aa86ce190d04f147daf46fbc893a1966
|
[
"Apache-2.0"
] | 13 |
2020-05-19T06:29:20.000Z
|
2021-12-22T16:40:17.000Z
|
qiskit/advanced/aqua/finance/data_providers/time_series.ipynb
|
gvvynplaine/qiskit-iqx-tutorials
|
40af3da7aa86ce190d04f147daf46fbc893a1966
|
[
"Apache-2.0"
] | null | null | null |
qiskit/advanced/aqua/finance/data_providers/time_series.ipynb
|
gvvynplaine/qiskit-iqx-tutorials
|
40af3da7aa86ce190d04f147daf46fbc893a1966
|
[
"Apache-2.0"
] | 9 |
2020-05-19T08:30:56.000Z
|
2021-09-01T11:30:25.000Z
| 189.854331 | 33,180 | 0.900908 |
[
[
[
"",
"_____no_output_____"
],
[
"# _*Qiskit Finance: Loading and Processing Stock-Market Time-Series Data*_\n\nThe latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.\n\n***\n### Contributors\nJakub Marecek<sup>[1]</sup>\n\n### Affiliation\n- <sup>[1]</sup>IBMQ",
"_____no_output_____"
],
[
"### Introduction\n\nAcross many problems in finance, one starts with time series. Here, we showcase how to generate pseudo-random time-series, download actual stock-market time series from a number of common providers, and how to compute time-series similarity measures.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nfrom qiskit.finance.data_providers import *\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nimport datetime\nimport matplotlib.pyplot as plt\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()",
"_____no_output_____"
],
[
"data = RandomDataProvider(tickers=[\"TICKER1\", \"TICKER2\"],\n start = datetime.datetime(2016, 1, 1),\n end = datetime.datetime(2016, 1, 30),\n seed = 1)\ndata.run()",
"_____no_output_____"
]
],
[
[
"Once the data are loaded, you can run a variety of algorithms on those to aggregate the data. Notably, you can compute the covariance matrix or a variant, which would consider alternative time-series similarity measures based on <a target=\"_blank\" href=\"https://en.wikipedia.org/wiki/Dynamic_time_warping\">dynamic time warping</a> (DTW). In DTW, changes that vary in speed, e.g., one stock's price following another stock's price with a small delay, can be accommodated.",
"_____no_output_____"
]
],
[
[
"means = data.get_mean_vector()\nprint(\"Means:\")\nprint(means)\n\nrho = data.get_similarity_matrix()\nprint(\"A time-series similarity measure:\")\nprint(rho)\nplt.imshow(rho)\nplt.show()\n\ncov = data.get_covariance_matrix()\nprint(\"A covariance matrix:\")\nprint(cov)\nplt.imshow(cov)\nplt.show()",
"Means:\n[16.66722941 72.03026566]\nA time-series similarity measure:\n[[1.0000000e+00 6.2284804e-04]\n [6.2284804e-04 1.0000000e+00]]\n"
]
],
[
[
"If you wish, you can look into the underlying pseudo-random time-series using. Please note that the private class members (starting with underscore) may change in future releases of Qiskit.",
"_____no_output_____"
]
],
[
[
"print(\"The underlying evolution of stock prices:\")\nfor (cnt, s) in enumerate(data._tickers):\n plt.plot(data._data[cnt], label=s)\nplt.legend()\nplt.xticks(rotation=90)\nplt.show()\n\nfor (cnt, s) in enumerate(data._tickers):\n print(s)\n print(data._data[cnt])",
"The underlying evolution of stock prices:\n"
]
],
[
[
"Clearly, you can adapt the number and names of tickers and the range of dates: ",
"_____no_output_____"
]
],
[
[
"data = RandomDataProvider(tickers=[\"CompanyA\", \"CompanyB\", \"CompanyC\"],\n start = datetime.datetime(2015, 1, 1),\n end = datetime.datetime(2016, 1, 30),\n seed = 1)\ndata.run()\nfor (cnt, s) in enumerate(data._tickers):\n plt.plot(data._data[cnt], label=s)\nplt.legend()\nplt.xticks(rotation=90)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Access to closing-price time-series\n\nWhile the access to real-time data usually requires a payment, it is possible \nto access historical (adjusted) closing prices via Wikipedia and Quandl\nfree of charge, following registration at:\nhttps://www.quandl.com/?modal=register\nIn the code below, one needs to specify actual tickers of actual NASDAQ\nissues and the access token you obtain from Quandl; by running the code below, you agree to the Quandl terms and \nconditions, including a liability waiver.\nNotice that at least two tickers are required for the computation\nof covariance and time-series matrices, but hundreds of tickers may go \nbeyond the fair usage limits of Quandl.",
"_____no_output_____"
]
],
[
[
"stocks = [\"REPLACEME1\", \"REPLACEME2\"]\nwiki = WikipediaDataProvider(\n token = \"REPLACEME\",\n tickers = stocks,\n stockmarket = StockMarket.NASDAQ,\n start = datetime.datetime(2016,1,1),\n end = datetime.datetime(2016,1,30))\nwiki.run()",
"_____no_output_____"
]
],
[
[
"Once the data are loaded, you can again compute the covariance matrix or its DTW variants.",
"_____no_output_____"
]
],
[
[
"if wiki._n <= 1: \n raise Exception(\"Not enough data to plot covariance or time-series similarity. Please use at least two tickers.\")\n\nrho = wiki.get_similarity_matrix()\nprint(\"A time-series similarity measure:\")\nprint(rho)\nplt.imshow(rho)\nplt.show()\n\ncov = wiki.get_covariance_matrix()\nprint(\"A covariance matrix:\")\nprint(cov)\nplt.imshow(cov)\nplt.show()",
"_____no_output_____"
]
],
[
[
"If you wish, you can look into the underlying time-series using:",
"_____no_output_____"
]
],
[
[
"print(\"The underlying evolution of stock prices:\")\nfor (cnt, s) in enumerate(stocks):\n plt.plot(wiki._data[cnt], label=s)\nplt.legend()\nplt.xticks(rotation=90)\nplt.show()\n\nfor (cnt, s) in enumerate(stocks):\n print(s)\n print(wiki._data[cnt])",
"_____no_output_____"
]
],
[
[
"### [Optional] Setup token to access recent, fine-grained time-series\n\nIf you would like to download professional data, you will have to set-up a token with one of the major providers. Let us now illustrate the data with NASDAQ Data on Demand, which can supply bid and ask prices in arbitrary resolution, as well as aggregates such as daily adjusted closing prices, for NASDAQ and NYSE issues.\n",
"_____no_output_____"
],
[
"If you don't have NASDAQ Data on Demand license, you can contact NASDAQ (cf. https://business.nasdaq.com/intel/GIS/Nasdaq-Data-on-Demand.html) to obtain a trial or paid license.\n\nIf and when you have access to NASDAQ Data on Demand using your own token, you should replace REPLACE-ME below with the token. \nTo assure the security of the connection, you should also have your own means of validating NASDAQ's certificates. The DataOnDemandProvider constructor has an optional argument `verify`, which can be `None` or a string or a boolean. If it is `None`, certify certificates will be used (default). If verify is a string, it should be pointing to a certificate for the HTTPS connection to NASDAQ (dataondemand.nasdaq.com), either in the form of a CA_BUNDLE file or a directory wherein to look.\n",
"_____no_output_____"
]
],
[
[
"from qiskit.finance.data_providers.data_on_demand_provider import StockMarket\ntry:\n nasdaq = DataOnDemandProvider(token = \"REPLACE-ME\",\n tickers = stocks,\n stockmarket = StockMarket.NASDAQ,\n start = datetime.datetime(2016,1,1),\n end = datetime.datetime(2016,1,2))\n nasdaq.run()\n nasdaq.plot()\nexcept QiskitFinanceError as e:\n print(e)\n print(\"You need to replace REPLACE-ME with a valid token.\")",
"_____no_output_____"
]
],
[
[
"Another major vendor of stock market data is Exchange Data International (EDI), whose API can be used to query over 100 emerging and frontier markets that are Africa, Asia, Far East, Latin America and Middle East, as well as the more established ones. See:\nhttps://www.exchange-data.com/pricing-data/adjusted-prices.php#exchange-coverage\nfor an overview of the coverage.\n\nThe access again requires a valid access token to replace REPLACE-ME below. The token can be obtained on a trial or paid-for basis at:\nhttps://www.quandl.com/\n\nIn the following example, you need to replace TICKER1 and TICKER2 with valid tickers at the London Stock Exchange. ",
"_____no_output_____"
]
],
[
[
"from qiskit.finance.data_providers.exchangedataprovider import StockMarket\ntry:\n lse = ExchangeDataProvider(token = \"REPLACE-ME\",\n tickers = [\"TICKER1\", \"TICKER2\"],\n stockmarket = StockMarket.LONDON,\n start = datetime.datetime(2019,1,1),\n end = datetime.datetime(2019,1,30))\n lse.run()\n lse.plot()\nexcept QiskitFinanceError as e: \n print(e)\n print(\"You need to replace REPLACE-ME with a valid token.\")",
"_____no_output_____"
]
],
[
[
"For the actual use of the data, please see the <a href=\"../optimization/portfolio_optimization.ipynb\">portfolio_optimization</a> or <a href=\"../optimization/portfolio_diversification.ipynb\">portfolio_diversification</a> notebooks. ",
"_____no_output_____"
]
],
[
[
"import qiskit.tools.jupyter\n%qiskit_version_table\n%qiskit_copyright",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a24076c9fe44f25fd6fc316e3fd88aa0f3db422
| 376,759 |
ipynb
|
Jupyter Notebook
|
ARIMA_stock_prediciton.ipynb
|
arjunjanamatti/finance_and_trading
|
c4e813f771b093c43ef3734f8d63204461ce0f07
|
[
"MIT"
] | null | null | null |
ARIMA_stock_prediciton.ipynb
|
arjunjanamatti/finance_and_trading
|
c4e813f771b093c43ef3734f8d63204461ce0f07
|
[
"MIT"
] | null | null | null |
ARIMA_stock_prediciton.ipynb
|
arjunjanamatti/finance_and_trading
|
c4e813f771b093c43ef3734f8d63204461ce0f07
|
[
"MIT"
] | null | null | null | 652.961872 | 23,640 | 0.937682 |
[
[
[
"import os\nos.chdir(\"C:/Users/Arjun Janamatti/Downloads/Data sets 01042020/Data sets 01042020/\")\nimport pandas as pd\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nfrom statsmodels.tsa.stattools import acf, pacf\nimport statsmodels.tsa.stattools as ts\nfrom statsmodels.tsa.arima_model import ARIMA\n\nvariables = pd.read_csv(\"HINDUNILVR.NS__Set2.csv\")\nfor i in range(-2,-7,-1):\n print(i)\n column_to_be_checked = variables.columns[i]\n price = variables[column_to_be_checked]\n lnprice = np.log(price)\n plt.plot(lnprice)\n plt.show()\n acf_1 = acf(lnprice)[1:20]\n test_df = pd.DataFrame([acf_1]).T\n test_df.columns = [\"Autocorrelation\"]\n test_df.index = test_df.index + 1\n test_df.plot(kind = \"bar\")\n plt.show()\n pacf_1 = pacf(lnprice)[1:20]\n test_df = pd.DataFrame([pacf_1]).T\n test_df.columns = [\"Partial Autocorrelation\"]\n test_df.index = test_df.index + 1\n test_df.plot(kind = \"bar\")\n plt.show()\n result = ts.adfuller(lnprice,1)\n result\n lnprice_diff = lnprice - lnprice.shift()\n diff = lnprice_diff.dropna()\n acf_1_diff = acf(diff)[1:20]\n test_df = pd.DataFrame([acf_1_diff]).T\n test_df.columns = [\"First Difference Autocorrelation\"]\n test_df.index += 1\n test_df.plot(kind=\"bar\")\n pacf_1_diff = pacf(diff)[1:20]\n plt.plot(pacf_1_diff)\n plt.show()\n test_df = pd.DataFrame([pacf_1_diff]).T\n test_df.columns = [\"First Difference Partial Autocorrelation\"]\n test_df.index += 1\n test_df.plot(kind=\"bar\")\n price_matrix = lnprice.as_matrix()\n model = ARIMA(price_matrix, order = (0,1,0))\n model_fit = model.fit(disp = 0)\n print(model_fit.summary())\n predictions = model_fit.predict(36,41, typ=\"levels\")\n predictions\n predictionsadjusted = np.exp(predictions)\n predictionsadjusted\n plt.plot(predictionsadjusted)\n plt.title(\"forecasted Price\")\n plt.show()\n np.savetxt(\"saved_data_{0}.txt\".format(column_to_be_checked),predictionsadjusted,fmt = \"%6.5d\")",
"-2\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a240bb1825bf6c0cb811ba1c84bf9a37bd06c2c
| 6,024 |
ipynb
|
Jupyter Notebook
|
Bloque 3 - Machine Learning/03_Time_Series/ejercicios/02_Time_Series_Forecast.ipynb
|
franciscocanon-thebridge/bootcamp_thebridge_PTSep20
|
e81cbdcb9254fa46a3925f41c583748e25b459c0
|
[
"MIT"
] | null | null | null |
Bloque 3 - Machine Learning/03_Time_Series/ejercicios/02_Time_Series_Forecast.ipynb
|
franciscocanon-thebridge/bootcamp_thebridge_PTSep20
|
e81cbdcb9254fa46a3925f41c583748e25b459c0
|
[
"MIT"
] | null | null | null |
Bloque 3 - Machine Learning/03_Time_Series/ejercicios/02_Time_Series_Forecast.ipynb
|
franciscocanon-thebridge/bootcamp_thebridge_PTSep20
|
e81cbdcb9254fa46a3925f41c583748e25b459c0
|
[
"MIT"
] | 1 |
2021-01-23T10:37:15.000Z
|
2021-01-23T10:37:15.000Z
| 22.818182 | 269 | 0.566567 |
[
[
[
"## Ejercicio Time Series Forecast\nPara este ejercicio vamos a predecir cuál será la demanda de pasajeros de una aerolínea, para poder anticiparse a las contrataciones de personal, mantenimiento de las aeronaves y gestión de inventario y comidas.\n\nPara ello, se pide:\n1. Carga datos (AirPassengers.csv) y representa la serie. ¿Hay seasonality? ¿Cada cuánto?\n2. Crea en una gráfica la variable original + su media obtenida mediante una rolling window con el valor de seasonality obtenido en el apartado anterior. Tienes que usar la función ``rolling()`` del DataFrame.\n3. Comprueba de manera estadística si la serie es o no stationary.\n4. Algunas veces, aplicar transformaciones ayudan a la hora de predecir. Aplica una transformación logarítmica sobre los datos para mejorar el proceso de transformación de tu time series a stationary. Recuerda, después del forecast, invertir la transformación.\n5. Divide en train y test. Guarda 20 muestras para test.\n6. Créate un modelo ARIMA. Habrá varias combinaciones en función de sus hiperparámetros... Mide el MAE y RMSE del modelo en predicción. \n7. Créate un modelo SARIMA. Ten en cuenta el parámetro \"m\" de la función ARIMA, mediante el cual se establece el seasonality.\n7. Representa en una gráfica los datos de test y tus predicciones.\n8. Prueba un decission tree y un random forest, a ver qué performance presentan y compáralos con el anterior",
"_____no_output_____"
],
[
"## 1. Carga datos y representa la serie",
"_____no_output_____"
],
[
"## 2. Crea en una gráfica la variable original vs su media obtenida mediante una rolling window con el valor de seasonality obtenido en el apartado anterior",
"_____no_output_____"
],
[
"## 3. Comprueba de manera estadística si la serie es o no stationary.",
"_____no_output_____"
],
[
"## 4. Aplica una transformación logarítmica",
"_____no_output_____"
],
[
"## 5. Divide en train y test. Guarda 20 muestras para test.",
"_____no_output_____"
],
[
"## 6. Crea tu primer modelo ARIMA",
"_____no_output_____"
],
[
"## 7. Representa en una gráfica los datos de test y tus predicciones.",
"_____no_output_____"
],
[
"## 8. Prueba otros modelos, a ver qué performance presentan\n\nEn este apartado puedes utilizar cualquiera de los modelos vistos en el apartado supervisado de regresión, creando como nuevas variables los lags de la serie temporal.",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a2438a61ebb0eab439852a49bb836571726e38d
| 2,175 |
ipynb
|
Jupyter Notebook
|
00_core.ipynb
|
tcfastai/start
|
20329be0a431de4fbefed58b8a5dc5a7d7b1eef4
|
[
"Apache-2.0"
] | null | null | null |
00_core.ipynb
|
tcfastai/start
|
20329be0a431de4fbefed58b8a5dc5a7d7b1eef4
|
[
"Apache-2.0"
] | null | null | null |
00_core.ipynb
|
tcfastai/start
|
20329be0a431de4fbefed58b8a5dc5a7d7b1eef4
|
[
"Apache-2.0"
] | null | null | null | 17.682927 | 90 | 0.472644 |
[
[
[
"1+1\n",
"_____no_output_____"
],
[
"# default_exp core",
"_____no_output_____"
],
[
"#export\ndef say_hello(to):\n \"Say hello to somebody\"\n return f'Hello {to}!'",
"_____no_output_____"
]
],
[
[
"# module name here\n\n> API details.",
"_____no_output_____"
]
],
[
[
"#hide\nfrom nbdev.showdoc import *",
"_____no_output_____"
]
],
[
[
"testing the mark down ability\n",
"_____no_output_____"
]
],
[
[
"say_hello(\"Sylvain\")\n",
"_____no_output_____"
],
[
"from IPython.display import display,SVG\ndisplay(SVG('<svg height=\"100\"><circle cx=\"50\" cy=\"50\" r=\"40\"/></svg>'))\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a2456694e163bd223919e83ca3e873c57e231c6
| 1,742 |
ipynb
|
Jupyter Notebook
|
0_notebooks_training/getBuildingFootPrintTooComplex.ipynb
|
tonybutzer/ajit
|
c7837bd08a73a1d54edc9f3347fe588f196a3b20
|
[
"MIT"
] | null | null | null |
0_notebooks_training/getBuildingFootPrintTooComplex.ipynb
|
tonybutzer/ajit
|
c7837bd08a73a1d54edc9f3347fe588f196a3b20
|
[
"MIT"
] | null | null | null |
0_notebooks_training/getBuildingFootPrintTooComplex.ipynb
|
tonybutzer/ajit
|
c7837bd08a73a1d54edc9f3347fe588f196a3b20
|
[
"MIT"
] | null | null | null | 28.096774 | 157 | 0.567164 |
[
[
[
"! wget https://raw.githubusercontent.com/aws-samples/aws-open-data-satellite-lidar-tutorial/master/Building-Footprint-Lite.ipynb",
"--2022-02-26 00:22:48-- https://raw.githubusercontent.com/aws-samples/aws-open-data-satellite-lidar-tutorial/master/Building-Footprint-Lite.ipynb\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.111.133, 185.199.109.133, 185.199.108.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.111.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 3009945 (2.9M) [text/plain]\nSaving to: ‘Building-Footprint-Lite.ipynb’\n\n100%[======================================>] 3,009,945 --.-K/s in 0.1s \n\n2022-02-26 00:22:49 (22.4 MB/s) - ‘Building-Footprint-Lite.ipynb’ saved [3009945/3009945]\n\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a2457265a5285c4b6e24bc54365e9f6143fa71e
| 2,300 |
ipynb
|
Jupyter Notebook
|
python-data-structures/leetocde/tree-level-order.ipynb
|
dimastatz/courses
|
663f19c53427552034e07f27ff0604b2d1d132ec
|
[
"MIT"
] | null | null | null |
python-data-structures/leetocde/tree-level-order.ipynb
|
dimastatz/courses
|
663f19c53427552034e07f27ff0604b2d1d132ec
|
[
"MIT"
] | null | null | null |
python-data-structures/leetocde/tree-level-order.ipynb
|
dimastatz/courses
|
663f19c53427552034e07f27ff0604b2d1d132ec
|
[
"MIT"
] | 1 |
2022-03-24T01:16:12.000Z
|
2022-03-24T01:16:12.000Z
| 23.71134 | 140 | 0.472609 |
[
[
[
"# Binary Tree Level Order Traversal\nGiven the root of a binary tree, return the level order traversal of its nodes' values. (i.e., from left to right, level by level).\n\n\n### Example 1:\n- Input: root = [3,9,20,null,null,15,7]\n- Output: [[3],[9,20],[15,7]]\n\n### Example 2:\n- Input: root = [1]\n- Output: [[1]]\n\n### Example 3:\n- Input: root = []\n- Output: []\n\n\n## Solution\n\n### Intuition\nPerform breadth first traversal. Print all nodes as they appear in tree\n\n### Implementation\n",
"_____no_output_____"
]
],
[
[
"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\ndef level_order(root: TreeNode) -> list:\n res, queue = [], [] if not root else [root]\n \n while queue:\n tmp = []\n res.append([n.val for n in queue])\n \n for n in queue:\n if n.left:\n tmp.append(n.left)\n if n.right:\n tmp.append(n.right)\n queue = tmp\n \n return res\n\n",
"_____no_output_____"
]
],
[
[
"Analysis:\n- Time Complexity: O(n)\n- Space Complexity: O(n)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a24598e991e6978b4d33dd925565dde5ee396c2
| 31,031 |
ipynb
|
Jupyter Notebook
|
site/en/tutorials/audio/transfer_learning_audio.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null |
site/en/tutorials/audio/transfer_learning_audio.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null |
site/en/tutorials/audio/transfer_learning_audio.ipynb
|
Pandinosaurus/docs-2
|
3550667e06ea24580b6d907aaf09f0c8e0dfca23
|
[
"Apache-2.0"
] | null | null | null | 36.507059 | 599 | 0.552963 |
[
[
[
"##### Copyright 2021 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/audio/transfer_learning_audio\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/audio/transfer_learning_audio.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/audio/transfer_learning_audio.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/audio/transfer_learning_audio.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n <td>\n <a href=\"https://tfhub.dev/google/yamnet/1\"><img src=\"https://www.tensorflow.org/images/hub_logo_32px.png\" />See TF Hub model</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"# Transfer learning with YAMNet for environmental sound classification\n\n[YAMNet](https://tfhub.dev/google/yamnet/1) is a pre-trained deep neural network that can predict audio events from [521 classes](https://github.com/tensorflow/models/blob/master/research/audioset/yamnet/yamnet_class_map.csv), such as laughter, barking, or a siren. \n\n In this tutorial you will learn how to:\n\n- Load and use the YAMNet model for inference.\n- Build a new model using the YAMNet embeddings to classify cat and dog sounds.\n- Evaluate and export your model.\n",
"_____no_output_____"
],
[
"## Import TensorFlow and other libraries\n",
"_____no_output_____"
],
[
"Start by installing [TensorFlow I/O](https://www.tensorflow.org/io), which will make it easier for you to load audio files off disk.",
"_____no_output_____"
]
],
[
[
"!pip install tensorflow_io",
"_____no_output_____"
],
[
"import os\n\nfrom IPython import display\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport tensorflow_io as tfio",
"_____no_output_____"
]
],
[
[
"## About YAMNet\n\n[YAMNet](https://github.com/tensorflow/models/tree/master/research/audioset/yamnet) is a pre-trained neural network that employs the [MobileNetV1](https://arxiv.org/abs/1704.04861) depthwise-separable convolution architecture. It can use an audio waveform as input and make independent predictions for each of the 521 audio events from the [AudioSet](http://g.co/audioset) corpus.\n\nInternally, the model extracts \"frames\" from the audio signal and processes batches of these frames. This version of the model uses frames that are 0.96 second long and extracts one frame every 0.48 seconds .\n\nThe model accepts a 1-D float32 Tensor or NumPy array containing a waveform of arbitrary length, represented as single-channel (mono) 16 kHz samples in the range `[-1.0, +1.0]`. This tutorial contains code to help you convert WAV files into the supported format.\n\nThe model returns 3 outputs, including the class scores, embeddings (which you will use for transfer learning), and the log mel [spectrogram](https://www.tensorflow.org/tutorials/audio/simple_audio#spectrogram). You can find more details [here](https://tfhub.dev/google/yamnet/1).\n\nOne specific use of YAMNet is as a high-level feature extractor - the 1,024-dimensional embedding output. You will use the base (YAMNet) model's input features and feed them into your shallower model consisting of one hidden `tf.keras.layers.Dense` layer. Then, you will train the network on a small amount of data for audio classification _without_ requiring a lot of labeled data and training end-to-end. (This is similar to [transfer learning for image classification with TensorFlow Hub](https://www.tensorflow.org/tutorials/images/transfer_learning_with_hub) for more information.)\n\nFirst, you will test the model and see the results of classifying audio. You will then construct the data pre-processing pipeline.\n\n### Loading YAMNet from TensorFlow Hub\n\nYou are going to use a pre-trained YAMNet from [Tensorflow Hub](https://tfhub.dev/) to extract the embeddings from the sound files.\n\nLoading a model from TensorFlow Hub is straightforward: choose the model, copy its URL, and use the `load` function.\n\nNote: to read the documentation of the model, use the model URL in your browser.",
"_____no_output_____"
]
],
[
[
"yamnet_model_handle = 'https://tfhub.dev/google/yamnet/1'\nyamnet_model = hub.load(yamnet_model_handle)",
"_____no_output_____"
]
],
[
[
"With the model loaded, you can follow the [YAMNet basic usage tutorial](https://www.tensorflow.org/hub/tutorials/yamnet) and download a sample WAV file to run the inference.\n",
"_____no_output_____"
]
],
[
[
"testing_wav_file_name = tf.keras.utils.get_file('miaow_16k.wav',\n 'https://storage.googleapis.com/audioset/miaow_16k.wav',\n cache_dir='./',\n cache_subdir='test_data')\n\nprint(testing_wav_file_name)",
"_____no_output_____"
]
],
[
[
"You will need a function to load audio files, which will also be used later when working with the training data. (Learn more about reading audio files and their labels in [Simple audio recognition](https://www.tensorflow.org/tutorials/audio/simple_audio#reading_audio_files_and_their_labels).\n\nNote: The returned `wav_data` from `load_wav_16k_mono` is already normalized to values in the `[-1.0, 1.0]` range (for more information, go to [YAMNet's documentation on TF Hub](https://tfhub.dev/google/yamnet/1)).",
"_____no_output_____"
]
],
[
[
"# Utility functions for loading audio files and making sure the sample rate is correct.\n\[email protected]\ndef load_wav_16k_mono(filename):\n \"\"\" Load a WAV file, convert it to a float tensor, resample to 16 kHz single-channel audio. \"\"\"\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(\n file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav",
"_____no_output_____"
],
[
"testing_wav_data = load_wav_16k_mono(testing_wav_file_name)\n\n_ = plt.plot(testing_wav_data)\n\n# Play the audio file.\ndisplay.Audio(testing_wav_data,rate=16000)",
"_____no_output_____"
]
],
[
[
"### Load the class mapping\n\nIt's important to load the class names that YAMNet is able to recognize. The mapping file is present at `yamnet_model.class_map_path()` in the CSV format.",
"_____no_output_____"
]
],
[
[
"class_map_path = yamnet_model.class_map_path().numpy().decode('utf-8')\nclass_names =list(pd.read_csv(class_map_path)['display_name'])\n\nfor name in class_names[:20]:\n print(name)\nprint('...')",
"_____no_output_____"
]
],
[
[
"### Run inference\n\nYAMNet provides frame-level class-scores (i.e., 521 scores for every frame). In order to determine clip-level predictions, the scores can be aggregated per-class across frames (e.g., using mean or max aggregation). This is done below by `scores_np.mean(axis=0)`. Finally, to find the top-scored class at the clip-level, you take the maximum of the 521 aggregated scores.\n",
"_____no_output_____"
]
],
[
[
"scores, embeddings, spectrogram = yamnet_model(testing_wav_data)\nclass_scores = tf.reduce_mean(scores, axis=0)\ntop_class = tf.math.argmax(class_scores)\ninferred_class = class_names[top_class]\n\nprint(f'The main sound is: {inferred_class}')\nprint(f'The embeddings shape: {embeddings.shape}')",
"_____no_output_____"
]
],
[
[
"Note: The model correctly inferred an animal sound. Your goal in this tutorial is to increase the model's accuracy for specific classes. Also, notice that the model generated 13 embeddings, 1 per frame.",
"_____no_output_____"
],
[
"## ESC-50 dataset\n\nThe [ESC-50 dataset](https://github.com/karolpiczak/ESC-50#repository-content) ([Piczak, 2015](https://www.karolpiczak.com/papers/Piczak2015-ESC-Dataset.pdf)) is a labeled collection of 2,000 five-second long environmental audio recordings. The dataset consists of 50 classes, with 40 examples per class.\n\nDownload the dataset and extract it. \n",
"_____no_output_____"
]
],
[
[
"_ = tf.keras.utils.get_file('esc-50.zip',\n 'https://github.com/karoldvl/ESC-50/archive/master.zip',\n cache_dir='./',\n cache_subdir='datasets',\n extract=True)",
"_____no_output_____"
]
],
[
[
"### Explore the data\n\nThe metadata for each file is specified in the csv file at `./datasets/ESC-50-master/meta/esc50.csv`\n\nand all the audio files are in `./datasets/ESC-50-master/audio/`\n\nYou will create a pandas `DataFrame` with the mapping and use that to have a clearer view of the data.\n",
"_____no_output_____"
]
],
[
[
"esc50_csv = './datasets/ESC-50-master/meta/esc50.csv'\nbase_data_path = './datasets/ESC-50-master/audio/'\n\npd_data = pd.read_csv(esc50_csv)\npd_data.head()",
"_____no_output_____"
]
],
[
[
"### Filter the data\n\nNow that the data is stored in the `DataFrame`, apply some transformations:\n\n- Filter out rows and use only the selected classes - `dog` and `cat`. If you want to use any other classes, this is where you can choose them.\n- Amend the filename to have the full path. This will make loading easier later.\n- Change targets to be within a specific range. In this example, `dog` will remain at `0`, but `cat` will become `1` instead of its original value of `5`.",
"_____no_output_____"
]
],
[
[
"my_classes = ['dog', 'cat']\nmap_class_to_id = {'dog':0, 'cat':1}\n\nfiltered_pd = pd_data[pd_data.category.isin(my_classes)]\n\nclass_id = filtered_pd['category'].apply(lambda name: map_class_to_id[name])\nfiltered_pd = filtered_pd.assign(target=class_id)\n\nfull_path = filtered_pd['filename'].apply(lambda row: os.path.join(base_data_path, row))\nfiltered_pd = filtered_pd.assign(filename=full_path)\n\nfiltered_pd.head(10)",
"_____no_output_____"
]
],
[
[
"### Load the audio files and retrieve embeddings\n\nHere you'll apply the `load_wav_16k_mono` and prepare the WAV data for the model.\n\nWhen extracting embeddings from the WAV data, you get an array of shape `(N, 1024)` where `N` is the number of frames that YAMNet found (one for every 0.48 seconds of audio).",
"_____no_output_____"
],
[
"Your model will use each frame as one input. Therefore, you need to create a new column that has one frame per row. You also need to expand the labels and the `fold` column to proper reflect these new rows.\n\nThe expanded `fold` column keeps the original values. You cannot mix frames because, when performing the splits, you might end up having parts of the same audio on different splits, which would make your validation and test steps less effective.",
"_____no_output_____"
]
],
[
[
"filenames = filtered_pd['filename']\ntargets = filtered_pd['target']\nfolds = filtered_pd['fold']\n\nmain_ds = tf.data.Dataset.from_tensor_slices((filenames, targets, folds))\nmain_ds.element_spec",
"_____no_output_____"
],
[
"def load_wav_for_map(filename, label, fold):\n return load_wav_16k_mono(filename), label, fold\n\nmain_ds = main_ds.map(load_wav_for_map)\nmain_ds.element_spec",
"_____no_output_____"
],
[
"# applies the embedding extraction model to a wav data\ndef extract_embedding(wav_data, label, fold):\n ''' run YAMNet to extract embedding from the wav data '''\n scores, embeddings, spectrogram = yamnet_model(wav_data)\n num_embeddings = tf.shape(embeddings)[0]\n return (embeddings,\n tf.repeat(label, num_embeddings),\n tf.repeat(fold, num_embeddings))\n\n# extract embedding\nmain_ds = main_ds.map(extract_embedding).unbatch()\nmain_ds.element_spec",
"_____no_output_____"
]
],
[
[
"### Split the data\n\nYou will use the `fold` column to split the dataset into train, validation and test sets.\n\nESC-50 is arranged into five uniformly-sized cross-validation `fold`s, such that clips from the same original source are always in the same `fold` - find out more in the [ESC: Dataset for Environmental Sound Classification](https://www.karolpiczak.com/papers/Piczak2015-ESC-Dataset.pdf) paper.\n\nThe last step is to remove the `fold` column from the dataset since you're not going to use it during training.\n",
"_____no_output_____"
]
],
[
[
"cached_ds = main_ds.cache()\ntrain_ds = cached_ds.filter(lambda embedding, label, fold: fold < 4)\nval_ds = cached_ds.filter(lambda embedding, label, fold: fold == 4)\ntest_ds = cached_ds.filter(lambda embedding, label, fold: fold == 5)\n\n# remove the folds column now that it's not needed anymore\nremove_fold_column = lambda embedding, label, fold: (embedding, label)\n\ntrain_ds = train_ds.map(remove_fold_column)\nval_ds = val_ds.map(remove_fold_column)\ntest_ds = test_ds.map(remove_fold_column)\n\ntrain_ds = train_ds.cache().shuffle(1000).batch(32).prefetch(tf.data.AUTOTUNE)\nval_ds = val_ds.cache().batch(32).prefetch(tf.data.AUTOTUNE)\ntest_ds = test_ds.cache().batch(32).prefetch(tf.data.AUTOTUNE)",
"_____no_output_____"
]
],
[
[
"## Create your model\n\nYou did most of the work!\nNext, define a very simple [Sequential](https://www.tensorflow.org/guide/keras/sequential_model) model with one hidden layer and two outputs to recognize cats and dogs from sounds.\n",
"_____no_output_____"
]
],
[
[
"my_model = tf.keras.Sequential([\n tf.keras.layers.Input(shape=(1024), dtype=tf.float32,\n name='input_embedding'),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(len(my_classes))\n], name='my_model')\n\nmy_model.summary()",
"_____no_output_____"
],
[
"my_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=\"adam\",\n metrics=['accuracy'])\n\ncallback = tf.keras.callbacks.EarlyStopping(monitor='loss',\n patience=3,\n restore_best_weights=True)",
"_____no_output_____"
],
[
"history = my_model.fit(train_ds,\n epochs=20,\n validation_data=val_ds,\n callbacks=callback)",
"_____no_output_____"
]
],
[
[
"Let's run the `evaluate` method on the test data just to be sure there's no overfitting.",
"_____no_output_____"
]
],
[
[
"loss, accuracy = my_model.evaluate(test_ds)\n\nprint(\"Loss: \", loss)\nprint(\"Accuracy: \", accuracy)",
"_____no_output_____"
]
],
[
[
"You did it!",
"_____no_output_____"
],
[
"## Test your model\n\nNext, try your model on the embedding from the previous test using YAMNet only.\n",
"_____no_output_____"
]
],
[
[
"scores, embeddings, spectrogram = yamnet_model(testing_wav_data)\nresult = my_model(embeddings).numpy()\n\ninferred_class = my_classes[result.mean(axis=0).argmax()]\nprint(f'The main sound is: {inferred_class}')",
"_____no_output_____"
]
],
[
[
"## Save a model that can directly take a WAV file as input\n\nYour model works when you give it the embeddings as input.\n\nIn a real-world scenario, you'll want to use audio data as a direct input.\n\nTo do that, you will combine YAMNet with your model into a single model that you can export for other applications.\n\nTo make it easier to use the model's result, the final layer will be a `reduce_mean` operation. When using this model for serving (which you will learn about later in the tutorial), you will need the name of the final layer. If you don't define one, TensorFlow will auto-define an incremental one that makes it hard to test, as it will keep changing every time you train the model. When using a raw TensorFlow operation, you can't assign a name to it. To address this issue, you'll create a custom layer that applies `reduce_mean` and call it `'classifier'`.\n",
"_____no_output_____"
]
],
[
[
"class ReduceMeanLayer(tf.keras.layers.Layer):\n def __init__(self, axis=0, **kwargs):\n super(ReduceMeanLayer, self).__init__(**kwargs)\n self.axis = axis\n\n def call(self, input):\n return tf.math.reduce_mean(input, axis=self.axis)",
"_____no_output_____"
],
[
"saved_model_path = './dogs_and_cats_yamnet'\n\ninput_segment = tf.keras.layers.Input(shape=(), dtype=tf.float32, name='audio')\nembedding_extraction_layer = hub.KerasLayer(yamnet_model_handle,\n trainable=False, name='yamnet')\n_, embeddings_output, _ = embedding_extraction_layer(input_segment)\nserving_outputs = my_model(embeddings_output)\nserving_outputs = ReduceMeanLayer(axis=0, name='classifier')(serving_outputs)\nserving_model = tf.keras.Model(input_segment, serving_outputs)\nserving_model.save(saved_model_path, include_optimizer=False)",
"_____no_output_____"
],
[
"tf.keras.utils.plot_model(serving_model)",
"_____no_output_____"
]
],
[
[
"Load your saved model to verify that it works as expected.",
"_____no_output_____"
]
],
[
[
"reloaded_model = tf.saved_model.load(saved_model_path)",
"_____no_output_____"
]
],
[
[
"And for the final test: given some sound data, does your model return the correct result?",
"_____no_output_____"
]
],
[
[
"reloaded_results = reloaded_model(testing_wav_data)\ncat_or_dog = my_classes[tf.math.argmax(reloaded_results)]\nprint(f'The main sound is: {cat_or_dog}')",
"_____no_output_____"
]
],
[
[
"If you want to try your new model on a serving setup, you can use the 'serving_default' signature.",
"_____no_output_____"
]
],
[
[
"serving_results = reloaded_model.signatures['serving_default'](testing_wav_data)\ncat_or_dog = my_classes[tf.math.argmax(serving_results['classifier'])]\nprint(f'The main sound is: {cat_or_dog}')\n",
"_____no_output_____"
]
],
[
[
"## (Optional) Some more testing\n\nThe model is ready.\n\nLet's compare it to YAMNet on the test dataset.",
"_____no_output_____"
]
],
[
[
"test_pd = filtered_pd.loc[filtered_pd['fold'] == 5]\nrow = test_pd.sample(1)\nfilename = row['filename'].item()\nprint(filename)\nwaveform = load_wav_16k_mono(filename)\nprint(f'Waveform values: {waveform}')\n_ = plt.plot(waveform)\n\ndisplay.Audio(waveform, rate=16000)",
"_____no_output_____"
],
[
"# Run the model, check the output.\nscores, embeddings, spectrogram = yamnet_model(waveform)\nclass_scores = tf.reduce_mean(scores, axis=0)\ntop_class = tf.math.argmax(class_scores)\ninferred_class = class_names[top_class]\ntop_score = class_scores[top_class]\nprint(f'[YAMNet] The main sound is: {inferred_class} ({top_score})')\n\nreloaded_results = reloaded_model(waveform)\nyour_top_class = tf.math.argmax(reloaded_results)\nyour_inferred_class = my_classes[your_top_class]\nclass_probabilities = tf.nn.softmax(reloaded_results, axis=-1)\nyour_top_score = class_probabilities[your_top_class]\nprint(f'[Your model] The main sound is: {your_inferred_class} ({your_top_score})')",
"_____no_output_____"
]
],
[
[
"## Next steps\n\nYou have created a model that can classify sounds from dogs or cats. With the same idea and a different dataset you can try, for example, building an [acoustic identifier of birds](https://www.kaggle.com/c/birdclef-2021/) based on their singing.\n\nShare your project with the TensorFlow team on social media!\n",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
4a2466e79e814473378b435f94dd0abeb099ccc6
| 11,743 |
ipynb
|
Jupyter Notebook
|
markdownFilesfromJupytor/.ipynb_checkpoints/Computation-on-Numpy-checkpoint.ipynb
|
Pankaj2505/numpy
|
bdfc309a48ad6a2b5a38555107b400da180eb565
|
[
"MIT"
] | null | null | null |
markdownFilesfromJupytor/.ipynb_checkpoints/Computation-on-Numpy-checkpoint.ipynb
|
Pankaj2505/numpy
|
bdfc309a48ad6a2b5a38555107b400da180eb565
|
[
"MIT"
] | null | null | null |
markdownFilesfromJupytor/.ipynb_checkpoints/Computation-on-Numpy-checkpoint.ipynb
|
Pankaj2505/numpy
|
bdfc309a48ad6a2b5a38555107b400da180eb565
|
[
"MIT"
] | null | null | null | 28.502427 | 398 | 0.514604 |
[
[
[
"# Here we will learn topics like \n1. Universal function\n2. aggreagate function\n3. Broadcasting\n\n",
"_____no_output_____"
],
[
"## Universal function\n\n1. A big difference between python array and numpy array is execution speed.\n2. python array iterate through each element and then process it.\n3. numpy array use the concept of vectorized operation, which mean computing all elements of an array parallely\n\n> we implement computaion in numpy array using universal function,\"ufunc\"\n\n#### Why numpy function are fast\n> it is beacuse here we use somthing called broadcasting, in broadcasting , first the code is compiled and then executed.This is the main difference between python and numpy , in python the code is compiled at run time, so this will take some time when we are processing a huge data, while in numpy , the huge data is already compiled during creation , so this will save time during execution",
"_____no_output_____"
],
[
"##### compare effectiveness between python array and numpy array",
"_____no_output_____"
]
],
[
[
"# we want to find reciprocal of array element\nimport numpy as np\nnp.random.seed(0)\n\n\n\ndef reciprocal (arr):\n output = np.empty(len(arr))\n for x in range(len(arr)):\n output[x] = 1.0/arr[x]\n return output\n\n\narray = np.random.randint(5,20, size=500) \n%timeit reciprocal(array)\n\n\n# calculating same operation using numpy universal function\n%timeit 1.0/array",
"3.59 ms ± 121 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n7.41 µs ± 398 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)\n"
]
],
[
[
"#### Exploring more Ufunc\n1. arithmatic opeartion are of two type unary and binary\n2. unary opeartion like (-), ** exponent, % modulas\n",
"_____no_output_____"
]
],
[
[
"x = np.arange(4)\nprint(\"x =\", x)\nprint(\"x + 5 =\", x + 5)\nprint(\"x - 5 =\", x - 5)\nprint(\"x * 2 =\", x * 2)\nprint(\"x / 2 =\", x / 2)\nprint(\"x // 2 =\", x // 2) # floor division\n\nprint(\"-x = \", -x)\nprint(\"x ** 2 = \", x ** 2) #power\nprint(\"x % 2 = \", x % 2) # reminder",
"x = [0 1 2 3]\nx + 5 = [5 6 7 8]\nx - 5 = [-5 -4 -3 -2]\nx * 2 = [0 2 4 6]\nx / 2 = [0. 0.5 1. 1.5]\nx // 2 = [0 0 1 1]\n-x = [ 0 -1 -2 -3]\nx ** 2 = [0 1 4 9]\nx % 2 = [0 1 0 1]\n"
],
[
"y = np.arange(-10,-5)\nprint('y',y)\nprint(\"python absolute function abs\",np.abs(y) )\nprint('numpy absolute function absolute', np.absolute(y))",
"y [-10 -9 -8 -7 -6]\npython absolute function abs [10 9 8 7 6]\nnumpy absolute function absolute [10 9 8 7 6]\n"
],
[
"x = [1, 2, 3]\nprint(\"x =\", x)\nprint(\"e^x =\", np.exp(x))\nprint(\"2^x =\", np.exp2(x))\nprint(\"3^x =\", np.power(3, x))\n\n\nx = [1, 2, 4, 10]\nprint(\"x =\", x)\nprint(\"ln(x) =\", np.log(x))\nprint(\"log2(x) =\", np.log2(x))\nprint(\"log10(x) =\", np.log10(x))",
"x = [1, 2, 3]\ne^x = [ 2.71828183 7.3890561 20.08553692]\n2^x = [2. 4. 8.]\n3^x = [ 3 9 27]\nx = [1, 2, 4, 10]\nln(x) = [0. 0.69314718 1.38629436 2.30258509]\nlog2(x) = [0. 1. 2. 3.32192809]\nlog10(x) = [0. 0.30103 0.60205999 1. ]\n"
]
],
[
[
"#### some more operation\n> calling reduce on the add ufunc returns the sum of all elements in the array: \nSpecifying output",
"_____no_output_____"
]
],
[
[
"##### Specifying output\nx = np.arange(5)\ny = np.empty(5)\nnp.multiply(x, 10, out=y)\nprint(y)\n\n\nx = [1,2,3,4,5]\nprint(np.multiply.reduce(x))\nprint(np.multiply.accumulate(x))",
"[ 0. 10. 20. 30. 40.]\n120\n[ 1 2 6 24 120]\n"
]
],
[
[
"## Aggregation in Numpy\n1. sum() and np.sum() and np.nansum()\n2. remember np.sum() take cares of multidimensionality also . so never use python sum() with multidimensional array \n",
"_____no_output_____"
]
],
[
[
"big_array = np.random.rand(1000000)\n%timeit sum(big_array)\n%timeit np.sum(big_array)",
"264 ms ± 15 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n2.52 ms ± 198 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n"
]
],
[
[
"\n| Function Name | \tNaN-safe Version | Description |\n-----------------------|------------------------------|--------------------|\n|np.sum |\tnp.nansum |\tCompute sum of elements|\n|np.prod\t| np.nanprod\t| Compute product of elements| \n|np.mean\t|np.nanmean|\tCompute mean of elements|\n|p.std|\tnp.nanstd|\tCompute standard deviation|\n|np.var|\tnp.nanvar|\tCompute variance|\n|np.min|\tnp.nanmin|\tFind minimum value|\n|np.max|\tnp.nanmax|\tFind maximum value|\n|np.argmin|\tnp.nanargmin|\tFind index of minimum value|\n|np.argmax|\tnp.nanargmax|\tFind index of maximum value|\n|np.median|\tnp.nanmedian|\tCompute median of elements|\n|np.percentile|\tnp.nanpercentile|\tCompute rank-based statistics of elements|\n|np.any|\tN/A|\tEvaluate whether any elements are true|\n|np.all|\tN/A|\tEvaluate whether all elements are true|\n",
"_____no_output_____"
],
[
"# BroadCasting\n",
"_____no_output_____"
],
[
"#### Rules of Broadcasting\nBroadcasting in NumPy follows a strict set of rules to determine the interaction between the two arrays:\n\n- Rule 1: If the two arrays differ in their number of dimensions, the shape of the one with fewer dimensions is padded with ones on its leading (left) side.\n- Rule 2: If the shape of the two arrays does not match in any dimension, the array with shape equal to 1 in that dimension is stretched to match the other shape.\n- Rule 3: If in any dimension the sizes disagree and neither is equal to 1, an error is raised.",
"_____no_output_____"
],
[
"M = np.ones((2, 3)) \na = np.arange(3) \n- Let's consider an operation on these two arrays. The shape of the arrays are\n\nM.shape = (2, 3) \na.shape = (3,) \n- We see by rule 1 that the array a has fewer dimensions, so we pad it on the left with ones:\n\nM.shape -> (2, 3) \na.shape -> (1, 3) \n- By rule 2, we now see that the first dimension disagrees, so we stretch this dimension to match:\n\nM.shape -> (2, 3) \na.shape -> (2, 3) \n- The shapes match, and we see that the final shape will be (2, 3):",
"_____no_output_____"
],
[
"M = np.ones((3, 2)) \na = np.arange(3) \n- This is just a slightly different situation than in the first example: the matrix M is transposed. How does this affect the calculation? The shape of the arrays are\n\nM.shape = (3, 2) \na.shape = (3,) \n- Again, rule 1 tells us that we must pad the shape of a with ones:\n\nM.shape -> (3, 2) \na.shape -> (1, 3) \n- By rule 2, the first dimension of a is stretched to match that of M:\n\nM.shape -> (3, 2) \na.shape -> (3, 3) \n- Now we hit rule 3–the final shapes do not match, so these two arrays are incompatible, as we can observe by attempting this operation:",
"_____no_output_____"
],
[
"#### Practice of broadcasting\n",
"_____no_output_____"
]
],
[
[
"#Lets say we have 10 sample which measure three hight of three flower, so we need an array of 10 rows and 3 column\n\n#x = np.random.randint(1,50,size=30).reshape(10,3)\nx= np.random.random((10,3))\n#print(x)\n\n# to know the mean hight of each flower\ny= np.empty(3)\nnp.mean(x,axis =0,out=y) # axis zero mean row wise sum\nprint(y)\n\n# to verify if mean is correct, we all know sum of( mean-point ) is always 0 when all point are between 0,1\n\nxcentered= x-y\nprint(xcentered.mean(0))",
"[0.4368809 0.58307355 0.53017212]\n[ 0.00000000e+00 -4.44089210e-17 2.22044605e-17]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
]
] |
4a24775f8db2e9c1bf2912634f5a7fbedd4dab28
| 15,274 |
ipynb
|
Jupyter Notebook
|
Comparacion_CPV_MG.ipynb
|
abxda/Mexico-Population-Census-2020
|
2973f2fe609fd0f8f073fb71996e3ddb8ef9251d
|
[
"Apache-2.0"
] | 3 |
2021-02-12T20:37:11.000Z
|
2022-02-20T18:53:04.000Z
|
Comparacion_CPV_MG.ipynb
|
abxda/Mexico-Population-Census-2020
|
2973f2fe609fd0f8f073fb71996e3ddb8ef9251d
|
[
"Apache-2.0"
] | null | null | null |
Comparacion_CPV_MG.ipynb
|
abxda/Mexico-Population-Census-2020
|
2973f2fe609fd0f8f073fb71996e3ddb8ef9251d
|
[
"Apache-2.0"
] | 2 |
2021-02-23T00:56:26.000Z
|
2021-02-27T02:50:08.000Z
| 33.942222 | 509 | 0.307516 |
[
[
[
"import pandas as pd\nimport geopandas as gpd",
"_____no_output_____"
],
[
"estados = [\"Aguascalientes\",\"Baja California\",\"Baja California Sur\",\"Campeche\",\"Coahuila de Zaragoza\",\"Colima\",\"Chiapas\",\"Chihuahua\",\"Ciudad de México\",\"Durango\",\"Guanajuato\",\"Guerrero\",\"Hidalgo\",\"Jalisco\",\"México\",\"Michoacan de Ocampo\",\"Morelos\",\"Nayarit\",\"Nuevo Leon\",\"Oaxaca\",\"Puebla\",\"Queretaro\",\"Quintana Roo\",\"San Luis Potosi\",\"Sinaloa\",\"Sonora\",\"Tabasco\",\"Tamaulipas\",\"tlaxcala\",\"Veracruz Ignacio de la Llave\",\"Yucatan\",\"Zacatecas\"]",
"_____no_output_____"
],
[
"rows = []\nfor i in range(32):\n estado = str(i+1).zfill(2)\n print(\"Procesando: \" + estados[i])\n df = pd.read_csv(f\"./inegi/ccpvagebmza/csv/conjunto_de_datos/conjunto_de_datos_ageb_urbana_{estado}_cpv2020.csv\",na_values=['N/A','N/D','*'])\n dfmza = df.drop(df[df.MZA == 0].index)\n manzanas_ccpv = dfmza.shape[0]\n mgmza = gpd.read_file(f\"./inegi/mgccpv/gpkg/cpv2020_{estado}.gpkg\")\n manzanas_mg = mgmza.shape[0] \n rows.append([estados[i], estado,manzanas_ccpv,manzanas_mg,manzanas_ccpv - manzanas_mg ])\n\ncomparacion = pd.DataFrame(rows, columns=[\"Estado\", \"CVE_EDO\", \"Total_Manzanas_CCPV\", \"Total_Manzanas_MG\", \"Diferencia\"])",
"_____no_output_____"
],
[
"comparacion",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code"
]
] |
4a247a578dad3f130084e754c3927e66c9e5d1c6
| 9,115 |
ipynb
|
Jupyter Notebook
|
notebooks/online_retail_database_setup.ipynb
|
markstur/db2-event-store-akka-streams
|
ecc8ab9327210d1ed7eab403d13de102d2235e4b
|
[
"Apache-2.0"
] | 10 |
2018-12-11T16:39:04.000Z
|
2021-09-12T09:38:23.000Z
|
notebooks/online_retail_database_setup.ipynb
|
markstur/db2-event-store-akka-streams
|
ecc8ab9327210d1ed7eab403d13de102d2235e4b
|
[
"Apache-2.0"
] | 2 |
2019-04-08T11:57:40.000Z
|
2019-04-14T13:21:43.000Z
|
notebooks/online_retail_database_setup.ipynb
|
markstur/db2-event-store-akka-streams
|
ecc8ab9327210d1ed7eab403d13de102d2235e4b
|
[
"Apache-2.0"
] | 10 |
2018-11-23T09:31:40.000Z
|
2020-10-20T16:27:54.000Z
| 29.787582 | 132 | 0.599122 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a247b67525b13cfbb0043c0bde2b6d53f1d5580
| 5,936 |
ipynb
|
Jupyter Notebook
|
how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb
|
savitamittal1/MachineLearningNotebooks-1
|
759ec3934cc826d73676c48c3a858b707820e56d
|
[
"MIT"
] | 1 |
2020-01-25T11:03:32.000Z
|
2020-01-25T11:03:32.000Z
|
how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb
|
MattyMc/MachineLearningNotebooks
|
6462e20c20eef629502485b8ba879c53df576243
|
[
"MIT"
] | 1 |
2019-03-18T04:33:24.000Z
|
2019-03-18T04:33:24.000Z
|
how-to-use-azureml/azure-databricks/amlsdk/installation-and-configuration-01.ipynb
|
MattyMc/MachineLearningNotebooks
|
6462e20c20eef629502485b8ba879c53df576243
|
[
"MIT"
] | 2 |
2020-09-07T01:41:49.000Z
|
2020-10-01T18:16:28.000Z
| 33.727273 | 286 | 0.538578 |
[
[
[
"Azure ML & Azure Databricks notebooks by Parashar Shah.\n\nCopyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.",
"_____no_output_____"
],
[
"We support installing AML SDK as library from GUI. When attaching a library follow this https://docs.databricks.com/user-guide/libraries.html and add the below string as your PyPi package. You can select the option to attach the library to all clusters or just one cluster.\n\n**install azureml-sdk**\n* Source: Upload Python Egg or PyPi\n* PyPi Name: `azureml-sdk[databricks]`\n* Select Install Library",
"_____no_output_____"
]
],
[
[
"import azureml.core\n\n# Check core SDK version number - based on build number of preview/master.\nprint(\"SDK version:\", azureml.core.VERSION)",
"_____no_output_____"
]
],
[
[
"Please specify the Azure subscription Id, resource group name, workspace name, and the region in which you want to create the Azure Machine Learning Workspace.\n\nYou can get the value of your Azure subscription ID from the Azure Portal, and then selecting Subscriptions from the menu on the left.\n\nFor the resource_group, use the name of the resource group that contains your Azure Databricks Workspace.\n\nNOTE: If you provide a resource group name that does not exist, the resource group will be automatically created. This may or may not succeed in your environment, depending on the permissions you have on your Azure Subscription.",
"_____no_output_____"
]
],
[
[
"# subscription_id = \"<your-subscription-id>\"\n# resource_group = \"<your-existing-resource-group>\"\n# workspace_name = \"<a-new-or-existing-workspace; it is unrelated to Databricks workspace>\"\n# workspace_region = \"<your-resource group-region>\"",
"_____no_output_____"
],
[
"# Set auth to be used by workspace related APIs.\n# For automation or CI/CD ServicePrincipalAuthentication can be used.\n# https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.authentication.serviceprincipalauthentication?view=azure-ml-py\nauth = None",
"_____no_output_____"
],
[
"# import the Workspace class and check the azureml SDK version\n# exist_ok checks if workspace exists or not.\n\nfrom azureml.core import Workspace\n\nws = Workspace.create(name = workspace_name,\n subscription_id = subscription_id,\n resource_group = resource_group, \n location = workspace_region,\n auth = auth,\n exist_ok=True)",
"_____no_output_____"
],
[
"#get workspace details\nws.get_details()",
"_____no_output_____"
],
[
"ws = Workspace(workspace_name = workspace_name,\n subscription_id = subscription_id,\n resource_group = resource_group,\n auth = auth)\n\n# persist the subscription id, resource group name, and workspace name in aml_config/config.json.\nws.write_config()\n#if you need to give a different path/filename please use this\n#write_config(path=\"/databricks/driver/aml_config/\",file_name=<alias_conf.cfg>)",
"_____no_output_____"
],
[
"help(Workspace)",
"_____no_output_____"
],
[
"# import the Workspace class and check the azureml SDK version\nfrom azureml.core import Workspace\n\nws = Workspace.from_config(auth = auth)\n#ws = Workspace.from_config(<full path>)\nprint('Workspace name: ' + ws.name, \n 'Azure region: ' + ws.location, \n 'Subscription id: ' + ws.subscription_id, \n 'Resource group: ' + ws.resource_group, sep = '\\n')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a24887b1b64408f054561902045ef5263c8db28
| 101,159 |
ipynb
|
Jupyter Notebook
|
guides/vectorizing_guide.ipynb
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 21 |
2021-11-23T13:01:36.000Z
|
2022-03-23T03:45:30.000Z
|
guides/vectorizing_guide.ipynb
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 217 |
2021-11-23T00:11:01.000Z
|
2022-03-30T08:11:49.000Z
|
guides/vectorizing_guide.ipynb
|
RelevanceAI/RelevanceAI
|
a0542f35153d9c842f3d2cd0955d6b07f6dfc07b
|
[
"Apache-2.0"
] | 4 |
2022-01-04T01:48:30.000Z
|
2022-02-11T03:19:32.000Z
| 29.561368 | 300 | 0.584545 |
[
[
[
"# 🔢 Vectorizing Guide",
"_____no_output_____"
],
[
"Firstly, we must import what we need from Relevance AI",
"_____no_output_____"
]
],
[
[
"from relevanceai import Client\nfrom relevanceai.utils.datasets import (\n get_iris_dataset,\n get_palmer_penguins_dataset,\n get_online_ecommerce_dataset,\n)",
"_____no_output_____"
],
[
"client = Client()",
"_____no_output_____"
]
],
[
[
"## Example 1\n\nFor this first example we going to work with a purely numeric dataset. The Iris dataset contains 4 numeric features and another text column with the label",
"_____no_output_____"
]
],
[
[
"iris_documents = get_iris_dataset()",
"_____no_output_____"
],
[
"dataset = client.Dataset(\"iris\")",
"⚠️ Your dataset has no documents. Make sure to insert some!\n"
],
[
"dataset.insert_documents(iris_documents, create_id=True)",
"while inserting, you can visit your dashboard at https://cloud.relevance.ai/dataset/iris/dashboard/monitor/\n✅ All documents inserted/edited successfully.\n"
]
],
[
[
"Here we can see the dataset schema, pre-vectorization",
"_____no_output_____"
]
],
[
[
"dataset.schema",
"_____no_output_____"
]
],
[
[
"Vectorizing is as simple specifying `create_feature_vector=True`\n\nWhile species is a text feature, we do not need to vectorize this. Besides, smart typechecking recognises this field as a text field we would not usually vectorize.\n\n`create_feature_vector=True` is what creates our \"document\" vectors. This concatenates all numeric/vector fields in a single \"document\" vector. This new vector_field is always called `f\"_dim{n_dims}_feature_vector_\"`, with n_dims being the size of the concatenated vector. \n\nFurthermore, for nuermic stability accross algorithms, sklearn's StandardScaler is applied to the concatenated vector field. If the concatenated size of a vector field is >512 dims, PCA is automatically applied.",
"_____no_output_____"
]
],
[
[
"dataset.vectorize(create_feature_vector=True)",
"No fields were given, vectorizing the following field(s): \nConcatenating the following fields to form a feature vector: PetalLengthCm, PetalWidthCm, SepalLengthCm, SepalWidthCm\n"
]
],
[
[
"### or",
"_____no_output_____"
]
],
[
[
"dataset.vectorize(fields=[\"numeric\"], create_feature_vector=True)",
"_____no_output_____"
]
],
[
[
"You can see below that the dataset schema has been altered accordingly",
"_____no_output_____"
]
],
[
[
"dataset.schema",
"_____no_output_____"
]
],
[
[
"## Example 2\n\nFor this second example we going to work with a mixed numeric and text dataset. The Palmer Penguins dataset contains several numeric features and another text column called \"Comments\"",
"_____no_output_____"
]
],
[
[
"penguins_documents = get_palmer_penguins_dataset()",
"_____no_output_____"
],
[
"dataset.insert_documents(penguins_documents, create_id=True)",
"while inserting, you can visit your dashboard at https://cloud.relevance.ai/dataset/iris/dashboard/monitor/\n✅ All documents inserted/edited successfully.\n"
]
],
[
[
"We must install the default Encoders for text vectorizing from vectorhub",
"_____no_output_____"
]
],
[
[
"!pip install vectorhub[encoders-text-tfhub-windows] # If you are on windows",
"_____no_output_____"
],
[
"!pip install vectorhub[encoders-text-tfhub] # other",
"_____no_output_____"
]
],
[
[
"No arguments automatically detects what text and image fieds are presetn in your dataset. Since this is a new function, its typechecking could be faulty. If need be, specifiy the data types in the same format as the schema with `_text_` denoting text_fields and `_image_` denoting image fields.",
"_____no_output_____"
]
],
[
[
"dataset.vectorize()",
"No fields were given, vectorizing the following field(s): Comments, Species, Stage\nThis operation will create the following vector_fields: ['Comments_use_vector_', 'Species_use_vector_', 'Stage_use_vector_']\n"
]
],
[
[
"### or",
"_____no_output_____"
]
],
[
[
"dataset.vectorize(fields=[\"Comments\"], create_feature_vector=True)",
"This operation will create the following vector_fields: ['Comments_use_vector_']\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a24b655129fa11732fb09bc319fc901545c34b0
| 8,852 |
ipynb
|
Jupyter Notebook
|
getting_started/6_cleanup.ipynb
|
seeq12/amazon-lookout-for-equipment
|
cb760aa0f9e2dad8fce13ed7c50282a10e320b40
|
[
"MIT-0"
] | null | null | null |
getting_started/6_cleanup.ipynb
|
seeq12/amazon-lookout-for-equipment
|
cb760aa0f9e2dad8fce13ed7c50282a10e320b40
|
[
"MIT-0"
] | null | null | null |
getting_started/6_cleanup.ipynb
|
seeq12/amazon-lookout-for-equipment
|
cb760aa0f9e2dad8fce13ed7c50282a10e320b40
|
[
"MIT-0"
] | null | null | null | 31.390071 | 384 | 0.565974 |
[
[
[
"# **Amazon Lookout for Equipment** - Getting started\n*Part 6 - Cleanup*",
"_____no_output_____"
],
[
"## Initialization\n---\nThis repository is structured as follow:\n\n```sh\n. lookout-equipment-demo\n|\n├── data/\n| ├── interim # Temporary intermediate data are stored here\n| ├── processed # Finalized datasets are usually stored here\n| | # before they are sent to S3 to allow the\n| | # service to reach them\n| └── raw # Immutable original data are stored here\n|\n├── getting_started/\n| ├── 1_data_preparation.ipynb\n| ├── 2_dataset_creation.ipynb\n| ├── 3_model_training.ipynb\n| ├── 4_model_evaluation.ipynb\n| ├── 5_inference_scheduling.ipynb\n| └── 6_cleanup.ipynb <<< THIS NOTEBOOK <<<\n|\n└── utils/\n └── lookout_equipment_utils.py\n```",
"_____no_output_____"
],
[
"### Notebook configuration update\nAmazon Lookout for Equipment being a very recent service, we need to make sure that we have access to the latest version of the AWS Python packages. If you see a `pip` dependency error, check that the `boto3` version is ok: if it's greater than 1.17.48 (the first version that includes the `lookoutequipment` API), you can discard this error and move forward with the next cell:",
"_____no_output_____"
]
],
[
[
"!pip install --quiet --upgrade boto3 awscli aiobotocore botocore sagemaker tqdm\n\nimport boto3\nprint(f'boto3 version: {boto3.__version__} (should be >= 1.17.48 to include Lookout for Equipment API)')\n\n# Restart the current notebook to ensure we take into account the previous updates:\nfrom IPython.core.display import HTML\nHTML(\"<script>Jupyter.notebook.kernel.restart()</script>\")",
"_____no_output_____"
]
],
[
[
"### Imports",
"_____no_output_____"
]
],
[
[
"import config\nimport datetime\nimport sagemaker\nimport sys\nimport time\n\n# Helper functions for managing Lookout for Equipment API calls:\nsys.path.append('../utils')\nimport lookout_equipment_utils as lookout",
"_____no_output_____"
],
[
"ROLE_ARN = sagemaker.get_execution_role()\nREGION_NAME = boto3.session.Session().region_name\nBUCKET = config.BUCKET\nPREFIX_TRAINING = config.PREFIX_TRAINING\nPREFIX_LABEL = config.PREFIX_LABEL\nPREFIX_INFERENCE = config.PREFIX_INFERENCE\nDATASET_NAME = config.DATASET_NAME\nMODEL_NAME = config.MODEL_NAME\nINFERENCE_SCHEDULER_NAME = config.INFERENCE_SCHEDULER_NAME",
"_____no_output_____"
],
[
"lookout_client = lookout.get_client(region_name=REGION_NAME)",
"_____no_output_____"
]
],
[
[
"## Deleting resources\n---\n### Deleting inference scheduler\nUsing the [**DeleteInferenceScheduler**](https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/API_DeleteInferenceScheduler.html) API to delete existing scheduler:",
"_____no_output_____"
]
],
[
[
"# Stopping the scheduler in case it's running:\ntry:\n print('Stopping the scheduler...')\n scheduler = lookout.LookoutEquipmentScheduler(\n scheduler_name=INFERENCE_SCHEDULER_NAME,\n model_name=MODEL_NAME,\n region_name=REGION_NAME\n )\n scheduler.stop()\n scheduler.delete()\n \nexcept Exception as e:\n error_code = e.response['Error']['Code']\n if (error_code == 'ResourceNotFoundException'):\n print(' > Scheduler not found, nothing to do')",
"_____no_output_____"
]
],
[
[
"### Deleting the trained models\nUsing the [**DeleteModel**](https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/API_DeleteModel.html) API to remove the model trained in this tutorial:",
"_____no_output_____"
]
],
[
[
"for model in lookout.list_models_for_datasets(model_name_prefix=MODEL_NAME):\n print(f'Deleting model {model}...')\n \n try:\n lookout_client.delete_model(ModelName=MODEL_NAME)\n print(f'Model \"{MODEL_NAME}\" is deleted successfully.')\n\n except Exception as e:\n error_code = e.response['Error']['Code']\n # If the dataset is used by existing models and we asked a\n # forced delete, we also delete the associated models before\n # trying again the dataset deletion:\n if (error_code == 'ConflictException'):\n print(('Model is currently being used (a training might be in '\n 'progress. Wait for the process to be completed and '\n 'retry.'))",
"_____no_output_____"
]
],
[
[
"### Deleting the dataset\nUsing the [**DeleteDataset**](https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/API_DeleteDataset.html) API to remove the dataset:",
"_____no_output_____"
]
],
[
[
"# Let's try to delete this dataset:\ntry:\n lookout_client.delete_dataset(DatasetName=DATASET_NAME)\n print(f'Dataset \"{DATASET_NAME}\" is deleted successfully.')\n\nexcept Exception as e:\n error_code = e.response['Error']['Code']\n if (error_code == 'ConflictException'):\n print(('Dataset is used by at least a model, delete the '\n 'associated model(s) before deleting this dataset.'))",
"_____no_output_____"
]
],
[
[
"### Cleaning the S3 bucket\nUncomment and run the following cell to clean the S3 bucket from the prefixes used throughout this tutorial for training data, label data and inference data. You can stop here if you would like to keep the data generated for further experimentation:",
"_____no_output_____"
]
],
[
[
"# !aws s3 rm s3://$BUCKET/$PREFIX_INFERENCE --recursive\n# !aws s3 rm s3://$BUCKET/$PREFIX_TRAINING --recursive\n# !aws s3 rm s3://$BUCKET/$PREFIX_LABEL --recursive",
"_____no_output_____"
]
],
[
[
"## Conclusion\n---",
"_____no_output_____"
],
[
"Use this notebook to cleanup all the ressources created while running this series of tutorials.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a24bc3dd3d118a30d87ac1dce256383686af01b
| 4,499 |
ipynb
|
Jupyter Notebook
|
convert_solid_and_brep.ipynb
|
Geode-solutions/Geode-Conversion
|
c005b22fca6c93a087e59b277dd167b068dad4dc
|
[
"CNRI-Python"
] | null | null | null |
convert_solid_and_brep.ipynb
|
Geode-solutions/Geode-Conversion
|
c005b22fca6c93a087e59b277dd167b068dad4dc
|
[
"CNRI-Python"
] | null | null | null |
convert_solid_and_brep.ipynb
|
Geode-solutions/Geode-Conversion
|
c005b22fca6c93a087e59b277dd167b068dad4dc
|
[
"CNRI-Python"
] | null | null | null | 29.598684 | 231 | 0.628362 |
[
[
[
"# Convert a SolidMesh into its BoundaryRepresentation\n\nThe goal is to transform a volumetric mesh into a model as defined here: https://docs.geode-solutions.com/datamodel\nThe core of the problem is to identify and to extract the topological information from the mesh.\nThere are two ways to realize this identification:\n- from the polyhedra adjacencies;\n- from Attribute values on the polyhedra.",
"_____no_output_____"
],
[
"## Import modules\n\nYou need to import OpenGeode and Geode-Conversion modules.",
"_____no_output_____"
]
],
[
[
"# Fix to better handle import since Python 3.8 on Windows\nimport os, sys, platform\nif sys.version_info >= (3,8,0) and platform.system() == \"Windows\":\n for path in [x.strip() for x in os.environ['PATH'].split(';') if x]:\n os.add_dll_directory(path)\n\nimport opengeode\nimport geode_conversion",
"_____no_output_____"
]
],
[
[
"## Conversion from polyhedra adjacencies\n\nIn this case, we want to convert the micro-topology, meaning the adjacency relationships at polyhedron level into the model topology, meaning a set of components (volumic, surfacic...) with their connectivity relationships.\n",
"_____no_output_____"
]
],
[
[
"# Load solid and convert it\nsolid = opengeode.load_tetrahedral_solid3D(\"model_as_solid.og_tso3d\")\nbrep_from_solid = geode_conversion.convert_solid_into_brep_from_adjacencies(solid)\nopengeode.save_brep(brep_from_solid, \"brep_from_solid.og_brep\")",
"_____no_output_____"
],
[
"# Display information on the model\nprint(brep_from_solid.nb_corners())\nprint(brep_from_solid.nb_lines())\nprint(brep_from_solid.nb_surfaces())\nprint(brep_from_solid.nb_blocks())",
"_____no_output_____"
]
],
[
[
"## Conversion from Attribute values\n\nIn this case, an Attribute is attached to the solid where each attribute value is stored in a polyhedron.\nPolyhedra with the same values will end up in the same Block in the boundary representation.\nFrom these Blocks, the corresponding boundary surfaces will be generated.",
"_____no_output_____"
]
],
[
[
"# Load solid and convert it\nsolid = opengeode.load_tetrahedral_solid3D(\"model_as_solid.og_tso3d\")\nbrep_from_solid = geode_conversion.convert_solid_into_brep_from_attribute(solid, \"attribute_name\")\nopengeode.save_brep(brep_from_solid, \"brep_from_solid.og_brep\")",
"_____no_output_____"
],
[
"# Display information on the model\nprint(brep_from_solid.nb_corners())\nprint(brep_from_solid.nb_lines())\nprint(brep_from_solid.nb_surfaces())\nprint(brep_from_solid.nb_blocks())",
"_____no_output_____"
]
],
[
[
"## Conversion from model to solid\n\nInversely, if you have a model with a volumetric mesh, you can also convert it into a solid.",
"_____no_output_____"
]
],
[
[
"converted_solid = geode_conversion.convert_brep_into_solid( brep_from_solid )\nopengeode.save_polyhedral_solid3D(converted_solid, \"solid_from_brep.og_pso3d\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a24bfddd978ed2493a53e11dc9cb0b55d54cc5e
| 727,901 |
ipynb
|
Jupyter Notebook
|
notebooks/Zomata EDA.ipynb
|
IamVicky90/Zomato
|
0f3945b9a41e5591712c6d49b0982a6bb151b141
|
[
"MIT"
] | null | null | null |
notebooks/Zomata EDA.ipynb
|
IamVicky90/Zomato
|
0f3945b9a41e5591712c6d49b0982a6bb151b141
|
[
"MIT"
] | null | null | null |
notebooks/Zomata EDA.ipynb
|
IamVicky90/Zomato
|
0f3945b9a41e5591712c6d49b0982a6bb151b141
|
[
"MIT"
] | null | null | null | 145.318626 | 178,786 | 0.799577 |
[
[
[
"import pandas as pd # To convert data into pandas dataframe\nimport numpy as np # For data and large type of arrays manipulation\nimport matplotlib.pyplot as plt #For data visualisation\nimport seaborn as sns # For data visualisation\nimport plotly.express as px #For data visualisation",
"_____no_output_____"
]
],
[
[
"# Data Preprocessing ",
"_____no_output_____"
]
],
[
[
"# df=pd.read_csv('../Master_Training_File/Zomato.csv') #Convert csv file to Pandas DataFrame\ndf=pd.read_csv(\"sample_data/Zomato.csv\") #Convert csv file to Pandas DataFrame",
"_____no_output_____"
],
[
"df.head(7) #Looking at the head",
"_____no_output_____"
],
[
"for col in df.columns:\n print(f'Column: {col} has',len(df[col].unique()),'unique Values.') #Seeing the unique values of every column",
"Column: serial has 1000 unique Values.\nColumn: url has 1000 unique Values.\nColumn: address has 713 unique Values.\nColumn: name has 684 unique Values.\nColumn: online_order has 2 unique Values.\nColumn: book_table has 2 unique Values.\nColumn: rate has 26 unique Values.\nColumn: votes has 306 unique Values.\nColumn: phone has 781 unique Values.\nColumn: location has 15 unique Values.\nColumn: rest_type has 40 unique Values.\nColumn: dish_liked has 320 unique Values.\nColumn: cuisines has 345 unique Values.\nColumn: approx_cost_for_two_people has 27 unique Values.\nColumn: reviews_list has 708 unique Values.\nColumn: menu_item has 256 unique Values.\nColumn: listed_in_type has 6 unique Values.\nColumn: listed_in_city has 2 unique Values.\n"
],
[
"df.isnull().sum() #Seeing the null values",
"_____no_output_____"
],
[
"df.duplicated().sum() # Sum the duplicated row",
"_____no_output_____"
],
[
"#By seeing at unique and null values I decide to drop serial,url,address, name phone, reviews_list,dish_liked\ndf.drop(['serial','url','address' ,'phone', 'reviews_list','dish_liked'],axis=1,inplace=True)",
"_____no_output_____"
],
[
"df.head() #Seeing the head to confirm drop columns",
"_____no_output_____"
],
[
"df.info() #Seeing the info to see data types",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 name 1000 non-null object \n 1 online_order 1000 non-null object \n 2 book_table 1000 non-null object \n 3 rate 900 non-null object \n 4 votes 1000 non-null float64\n 5 location 1000 non-null object \n 6 rest_type 999 non-null object \n 7 cuisines 997 non-null object \n 8 approx_cost_for_two_people 1000 non-null object \n 9 menu_item 1000 non-null object \n 10 listed_in_type 1000 non-null object \n 11 listed_in_city 1000 non-null object \ndtypes: float64(1), object(11)\nmemory usage: 93.9+ KB\n"
],
[
"df.dropna(inplace=True) #Drop all Nan values",
"_____no_output_____"
],
[
"df.rate.unique() #Seeing the unique values",
"_____no_output_____"
],
[
"df['rate']=df['rate'].str.replace('/5','')#Rplace '/5' with '' to convert it into number(float in this case)\ndf=df.loc[df.rate!='NEW'] #DataFrame without NEW values in rate colum\ndf['rate']=df['rate'].astype('float64')",
"_____no_output_____"
],
[
"df.rate.unique() #Seeing the unique values to confirm",
"_____no_output_____"
],
[
"df.rename(columns={'listed_in_type':'type','listed_in_city':'city','approx_cost_for_two_people':'cost'},inplace=True) #Rename the column for the simplicity",
"_____no_output_____"
],
[
"df.head() #Seeing the head to confirm the column name changes",
"_____no_output_____"
],
[
"df['cost'].unique() #Seeing the unique values for cost",
"_____no_output_____"
],
[
"remove_comma= lambda x:x.replace(',','') if type(x)==np.str else x #create a lambda function to remove ',' with empty string ''\ndf.cost=df.cost.apply(remove_comma) #apply the funtion remove_comma to extract comma out of it.\ndf.cost=df.cost.astype('int64')",
"_____no_output_____"
],
[
"df.cost.unique() #Seeing the unique values to confirm the above operation",
"_____no_output_____"
],
[
"df2=pd.get_dummies(df,columns=['online_order','book_table','type','city'],drop_first=True) #Convert 'online_order','book_table','type','city' columns to 0 and 1 by using get_dumies",
"_____no_output_____"
],
[
"df2.head() # To confirm get_dummies function",
"_____no_output_____"
],
[
"def ordinal_encoding(df,columns): # function to convert given columns to ordinal encoding\n for col in columns:\n df[col]=df[col].factorize()[0]\n return df",
"_____no_output_____"
],
[
"df3=ordinal_encoding(df2,columns=['location','name','rest_type','cuisines','menu_item']) #ordinal encoding the columns",
"_____no_output_____"
],
[
"df3.head() #Seeing the head to confirm ordinal_encoding done or not",
"_____no_output_____"
],
[
"df3.dtypes #Confirming the datatypes of each column",
"_____no_output_____"
]
],
[
[
"# Regression Analysis and Splitting the Data set",
"_____no_output_____"
]
],
[
[
"df3.describe().T #Seeing the data Statistically to extract some insights from the data",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split # Import library To split the whole data into train and test ",
"_____no_output_____"
],
[
"X=df3.drop(['cost'],axis=1) # Seperate the independent features\nY=df3['cost'] # Seperate the dependent ('cost') from independent feature\nx_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size",
"_____no_output_____"
]
],
[
[
"# EDA (Exploratory Data Analysis)",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16,9)) # To see the image somewhat big\nsns.heatmap(df3.corr(),annot=True) # plotting heatmap with the features correlation; in short seeing the features how much they correlate with each other",
"_____no_output_____"
],
[
"sns.countplot(df['online_order']) # Draw countplot to see online_order feature counts\nfig=plt.gcf() # to adjust the figure\nfig.set_size_inches(10,10) # to make the figure big",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"Here we can see that more than 60% people prefer online_order",
"_____no_output_____"
]
],
[
[
"sns.countplot(df['book_table']) # Draw countplot to see book_table feature counts\nfig=plt.gcf() # to adjust the figure\nfig.set_size_inches(10,10) # to make the figure big",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"Here we can see that more than 75% people prefer to not book the table",
"_____no_output_____"
]
],
[
[
"sns.countplot(df['city']) # Draw countplot to see city feature counts\nfig=plt.gcf() # to adjust the figure\nfig.set_size_inches(10,10) # to make the figure big",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"The data that we have recieved is biased more than towards Banashankari city",
"_____no_output_____"
]
],
[
[
"sns.countplot(df['book_table'],hue=df3['rate']) # Draw countplot to see book_table feature with rate\nfig=plt.gcf() # to adjust the figure\nfig.set_size_inches(10,10) # to make the figure big",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"Here the data that we have seen is very messy so we have better approach than this",
"_____no_output_____"
]
],
[
[
"y_=pd.crosstab(df3['rate'],df['book_table']) # creating a dataframe of crosstab between rate and book_table\ny_.plot(kind='bar',stacked=True,color=['yellow','red']) # creating a bar plot with stacked equal to True\nfig=plt.gcf() # to adjust the figure\nfig.set_size_inches(10,10) # to make the figure big",
"_____no_output_____"
]
],
[
[
"The people that book table is usually giving the high rate but the people that does not book the table are not giving much rate, this shows that the book_table customers are more satishfied than the customers who does not book the table. ",
"_____no_output_____"
]
],
[
[
"y_=pd.crosstab(df3['rate'],df['online_order']) # creating a dataframe of crosstab between rate and online_order\ny_.plot(kind='bar',stacked=True,color=['yellow','red'],figsize =(15,6)) # creating a bar plot with stacked equal to True",
"_____no_output_____"
]
],
[
[
"Here we can see no such relation, means online_order with yes and no are somewhat equally rated",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16,9)) #Set the figure size\nchart=sns.countplot(x=df['location']) # Countplot with location feature \nchart.set_xticklabels(labels=df['location'].unique(),rotation=90) # Setting the rotation of labels\nNone #To not return xticklabels",
"_____no_output_____"
],
[
"y_=pd.crosstab(df3['rate'],df['location']) # creating a dataframe of crosstab between rate and location\ny_.plot(kind='bar',stacked=True,figsize =(15,9)) # creating a bar plot with stacked equal to True",
"_____no_output_____"
],
[
"px.bar(pd.crosstab(df3['rate'],df['location'])) # Creating this same thing with plotly for better understanding",
"_____no_output_____"
],
[
"y_=pd.crosstab(df3['rate'],df['rest_type']) # creating a dataframe of crosstab between rate and rest_type\ny_.plot(kind='bar',stacked=True,figsize =(15,9),) # creating a bar plot with stacked equal to True",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,9)) #To increase the size of image\nsns.countplot(df['type']) # draw countplot ",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
],
[
"y_=pd.crosstab(df3['rate'],df['type']) # creating a dataframe of crosstab between rate and type\ny_.plot(kind='bar',stacked=True,figsize =(15,9),) # creating a bar plot with stacked equal to True\nplt.title('Type of services') # Create the title of the figure",
"_____no_output_____"
],
[
"px.scatter(data_frame=df3,x=df3['rate'],y=df3['cost']) # Plot scatter plot with plotly for the analysis between rate and cost",
"_____no_output_____"
],
[
"plt.figure(figsize=(16,9)) # To increase the size of the image\nsns.barplot(df['name'].value_counts()[:20],df['name'].value_counts()[:20].index) # Plotting barplot of 20 famous hotels\nplt.title('Top 20 hotels in Banglure') # Create the title of the figure",
"/usr/local/lib/python3.7/dist-packages/seaborn/_decorators.py:43: FutureWarning:\n\nPass the following variables as keyword args: x, y. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n\n"
]
],
[
[
"## Seeing the accuracy without applying hyperparameter and much feature engineering and data Preprocessing",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import r2_score,mean_absolute_error,mean_squared_error #importing different metrics for analysis",
"_____no_output_____"
],
[
"def compute_score(y_true,y_pred): # create the function that will compute the r2_score,mean_absolute_error and mean_squared_error\n '''This will compute the r2_score,mean_absolute_error and mean_squared_error\n args:\n y_true: provide the true y label means y_test\n y_pred: provide the pred y label'''\n r2=r2_score(y_true,y_pred)\n mae=mean_absolute_error(y_true,y_pred)\n mse=mean_squared_error(y_true,y_pred)\n print(f'r2_score is: {r2}\\n mean_absolute_error is: {mae}\\n mean_squared_error is {mse}')",
"_____no_output_____"
]
],
[
[
"### Seeing the prediction by Linear Models",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LinearRegression # import LinearRegression for training\nlr=LinearRegression() # LinearRegression model initialize\nlr.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=lr.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.24446052520232098\n mean_absolute_error is: 158.22038130650247\n mean_squared_error is 41794.19270791407\n"
],
[
"from sklearn.linear_model import Ridge # import RidgeRegression for training\nr=Ridge() # Ridge model initialize\nr.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=r.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.24555495828055118\n mean_absolute_error is: 158.1106325915856\n mean_squared_error is 41733.65193075651\n"
],
[
"from sklearn.linear_model import Lasso # import LassoRegression for training\nl=Lasso() # Lasso model initialize\nl.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=l.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.2430877658250984\n mean_absolute_error is: 158.5646514873468\n mean_squared_error is 41870.12966669257\n"
]
],
[
[
"### Seeing the predictions by Trees",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import RandomForestRegressor # import RandomForestRegressor for training\nrf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7157150802576158\n mean_absolute_error is: 89.87771929824561\n mean_squared_error is 15725.794767836258\n"
],
[
"from sklearn.tree import DecisionTreeRegressor # DecisionTreeRegressor model initialize\ndtr=DecisionTreeRegressor() # DecisionTreeRegressor model initialize\ndtr.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=dtr.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.3035888162470439\n mean_absolute_error is: 110.23391812865498\n mean_squared_error is 38523.3918128655\n"
],
[
"from sklearn.tree import ExtraTreeRegressor # ExtraTreeRegressor model initialize\netr=ExtraTreeRegressor() # ExtraTreeRegressor model initialize\netr.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=etr.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.4198775907636666\n mean_absolute_error is: 114.32748538011695\n mean_squared_error is 32090.643274853803\n"
],
[
"from sklearn.ensemble import AdaBoostRegressor # AdaBoostRegressor model initialize\nab=AdaBoostRegressor() # AdaBoostRegressor model initialize\nab.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=ab.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.543988677832925\n mean_absolute_error is: 133.52641594480775\n mean_squared_error is 25225.187712058338\n"
]
],
[
[
"### Seeing the prediction by Support Vector Machines",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVR # Support Vector Regressor model initialize\nsv=SVR() # SupportVectorRegressor model initialize\nsv.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=sv.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.06488943133202618\n mean_absolute_error is: 174.00745222420642\n mean_squared_error is 51727.530610603746\n"
]
],
[
[
"# Result: As we have seen form above RandomForestRegressor doing the best prediction",
"_____no_output_____"
],
[
"## Analysis of Models by doing some feature engineering on this existing data",
"_____no_output_____"
]
],
[
[
"def correlation(dataset, threshold): # Seeing the most correlated features\n '''This will take the dataset and the thereshold value for the correlation\n and return the column that have the correlation greater than the threshold'''\n col_corr = set() # Set of all the names of correlated columns\n corr_matrix = dataset.corr() \n for i in range(len(corr_matrix.columns)):\n for j in range(i):\n if abs(corr_matrix.iloc[i, j]) > threshold: # we are interested in absolute coeff value\n colname = corr_matrix.columns[i] # getting the name of column\n col_corr.add(colname)\n return col_corr",
"_____no_output_____"
],
[
"correlation(dataset=df3,threshold=0.7) # calling the correlation funtion to get high correlated columns",
"_____no_output_____"
],
[
"rf.fit(x_train.drop(['city_Bannerghatta Road', 'type_Dine-out'],axis=1),y_train) # Apply Random forest after droping high correlated features\ny_pred=rf.predict(x_test.drop(['city_Bannerghatta Road', 'type_Dine-out'],axis=1)) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.715921077057727\n mean_absolute_error is: 90.02701754385964\n mean_squared_error is 15714.399638596491\n"
],
[
"# Extracting the information regarding importance of each feature in the prediction\nfrom sklearn.feature_selection import mutual_info_regression\n# determine the mutual information\nmutual_info = mutual_info_regression(x_train, y_train)\nmutual_info",
"_____no_output_____"
],
[
"mutual_info_df=pd.Series(mutual_info) # convert mutual_infoto pandas series\nmutual_info_df.index=x_train.columns # initialize the columns\nmutual_info_df.sort_values(ascending=True).plot(kind='barh',figsize=(16,9)) # Draw the figure",
"_____no_output_____"
]
],
[
[
"### Note: \n* we have convert 'location','name','rest_type','cuisines','menu_item' features into ordinal encoding but in reality they don't have any kind of relation with them.\n* So, this may be a mistake beacause as we know when catagorical features doesn't have any kind of realtion then we use something called as one hot encoding",
"_____no_output_____"
]
],
[
[
"dummy=pd.get_dummies(df,columns=['location','name','rest_type','cuisines','menu_item','online_order','book_table','type','city'],drop_first=True)",
"_____no_output_____"
],
[
"X=dummy.drop(['cost'],axis=1) # Seperate the independent features\nY=dummy['cost'] # Seperate the dependent ('cost') from independent feature\nx_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size",
"_____no_output_____"
],
[
"rf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7605936438987342\n mean_absolute_error is: 82.78964912280702\n mean_squared_error is 13243.2463374269\n"
],
[
"from sklearn.ensemble import ExtraTreesRegressor# ExtraTreesRegressor importing the ExtraTreesRegressor\net=ExtraTreesRegressor() # ExtraTreesRegressor model initialize\net.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=et.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7267286752516344\n mean_absolute_error is: 72.0111111111111\n mean_squared_error is 15116.555506432751\n"
],
[
"!pip install category_encoders",
"Collecting category_encoders\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/44/57/fcef41c248701ee62e8325026b90c432adea35555cbc870aff9cfba23727/category_encoders-2.2.2-py2.py3-none-any.whl (80kB)\n\r\u001b[K |████ | 10kB 12.4MB/s eta 0:00:01\r\u001b[K |████████▏ | 20kB 10.0MB/s eta 0:00:01\r\u001b[K |████████████▏ | 30kB 8.3MB/s eta 0:00:01\r\u001b[K |████████████████▎ | 40kB 7.8MB/s eta 0:00:01\r\u001b[K |████████████████████▎ | 51kB 4.2MB/s eta 0:00:01\r\u001b[K |████████████████████████▍ | 61kB 4.4MB/s eta 0:00:01\r\u001b[K |████████████████████████████▍ | 71kB 4.8MB/s eta 0:00:01\r\u001b[K |████████████████████████████████| 81kB 3.7MB/s \n\u001b[?25hRequirement already satisfied: numpy>=1.14.0 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (1.19.5)\nRequirement already satisfied: patsy>=0.5.1 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (0.5.1)\nRequirement already satisfied: scikit-learn>=0.20.0 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (0.22.2.post1)\nRequirement already satisfied: statsmodels>=0.9.0 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (0.10.2)\nRequirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (1.1.5)\nRequirement already satisfied: scipy>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from category_encoders) (1.4.1)\nRequirement already satisfied: six in /usr/local/lib/python3.7/dist-packages (from patsy>=0.5.1->category_encoders) (1.15.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.20.0->category_encoders) (1.0.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.21.1->category_encoders) (2018.9)\nRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.21.1->category_encoders) (2.8.1)\nInstalling collected packages: category-encoders\nSuccessfully installed category-encoders-2.2.2\n"
],
[
"'''\nSo we convert these features into category_encoders with some base because there is so much catagories and our model may get \nthe problem of Curse of Dimensionality.\n''' \nimport category_encoders as ce\nencoder= ce.BaseNEncoder(cols=['location','name','rest_type','cuisines','menu_item','online_order','book_table','type','city'],return_df=True,base=5)\ndata_encoded=encoder.fit_transform(df)\ndata_encoded",
"/usr/local/lib/python3.7/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning:\n\npandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n\n/usr/local/lib/python3.7/dist-packages/category_encoders/utils.py:21: FutureWarning:\n\nis_categorical is deprecated and will be removed in a future version. Use is_categorical_dtype instead\n\n"
],
[
"X=data_encoded.drop(['cost'],axis=1) # Seperate the independent features\nY=data_encoded['cost'] # Seperate the dependent ('cost') from independent feature\nx_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size",
"_____no_output_____"
],
[
"rf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7277395975662817\n mean_absolute_error is: 88.19941520467836\n mean_squared_error is 15060.634295906433\n"
]
],
[
[
"### Note: Dummy catagorical encoding is doing better so we apply this in whole dataset",
"_____no_output_____"
],
[
"## Now applying the Standralization techniques",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import StandardScaler # Import The StandardScaler",
"_____no_output_____"
],
[
"ss=StandardScaler() # Initialize The StandardScaler\nx_ss=ss.fit_transform(dummy.drop(['cost'],axis=1)) # fit_transform the StandardScaler with dummy encoded data\nss_df=pd.DataFrame(data=x_ss,columns=dummy.drop(['cost'],axis=1).columns) # Convert the numpy array into dataframe",
"_____no_output_____"
],
[
"x_train,x_test,y_train,y_test=train_test_split(ss_df,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size",
"_____no_output_____"
],
[
"from sklearn.ensemble import RandomForestRegressor # import RandomForestRegressor for training\nrf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.746684379776325\n mean_absolute_error is: 84.56122807017545\n mean_squared_error is 14012.665387719298\n"
],
[
"from sklearn.preprocessing import MinMaxScaler # Import The MinMaxScaler",
"_____no_output_____"
],
[
"mms=MinMaxScaler() # Initialize The MinMaxScaler\nx_mms=mms.fit_transform(dummy.drop(['cost'],axis=1)) # fit_transform the MinMaxScaler with dummy encoded data\nmms_df=pd.DataFrame(data=x_mms,columns=dummy.drop(['cost'],axis=1).columns) # Convert the numpy array into dataframe",
"_____no_output_____"
],
[
"x_train,x_test,y_train,y_test=train_test_split(mms_df,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size\nfrom sklearn.ensemble import RandomForestRegressor # import RandomForestRegressor for training\nrf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7493042437206843\n mean_absolute_error is: 83.42578947368422\n mean_squared_error is 13867.74231988304\n"
],
[
"from sklearn.preprocessing import RobustScaler # Import The RobustScaler",
"_____no_output_____"
],
[
"rs=RobustScaler() # Initialize The RobustScaler\nx_rs=rs.fit_transform(dummy.drop(['cost'],axis=1)) # fit_transform the RobustScaler with dummy encoded data\nrs_df=pd.DataFrame(data=x_rs,columns=dummy.drop(['cost'],axis=1).columns) # Convert the numpy array into dataframe",
"_____no_output_____"
],
[
"x_train,x_test,y_train,y_test=train_test_split(rs_df,Y,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size\nfrom sklearn.ensemble import RandomForestRegressor # import RandomForestRegressor for training\nrf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7504365516834052\n mean_absolute_error is: 83.66625730994153\n mean_squared_error is 13805.106416959065\n"
]
],
[
[
"### Result:\n* So here Standralization, Min_Max_Scalar(Normalization) and RobustScaler don't do any good or bad so we leave it and go with the original dataset(dummy)",
"_____no_output_____"
],
[
"# Feature Selection",
"_____no_output_____"
]
],
[
[
"dummy_highly_correlated_lst=list(correlation(dataset=dummy,threshold=0.7)) # calling the correlation funtion (it is definde in 'Analysis of Models by doing some feature engineering on this existing data' section) to get high correlated columns ",
"_____no_output_____"
],
[
"len(dummy_highly_correlated_lst)",
"_____no_output_____"
],
[
"X_dummy=dummy.drop(['cost'],axis=1) # Seperate the independent features\nY_dummy=dummy['cost'] # Seperate the dependent ('cost') from independent feature\nx_train_dummy,x_test_dummy,y_train_dummy,y_test_dummy=train_test_split(X_dummy,Y_dummy,test_size=0.2,random_state=42) #split into x_train,x_test,y_train,y_test with 20% of test size",
"_____no_output_____"
],
[
"rf.fit(x_train_dummy.drop(dummy_highly_correlated_lst,axis=1),y_train_dummy) # Apply Random forest after droping high correlated features\ny_pred_dummy=rf.predict(x_test_dummy.drop(dummy_highly_correlated_lst,axis=1)) # predict the x_test\ncompute_score(y_test_dummy,y_pred_dummy) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7550381428181945\n mean_absolute_error is: 83.97356725146199\n mean_squared_error is 13550.560105263157\n"
],
[
"from sklearn.feature_selection import SelectFromModel # For feature Selection\nfrom sklearn.linear_model import Lasso # Import Lasso as feature Selection model",
"_____no_output_____"
],
[
"sel_features=SelectFromModel(Lasso(alpha=0.005)) # Initialize SelectFromModel along Lasso with 0.005 alpha values\nsel_features.fit(X_dummy,Y_dummy) # Fiting the model\nX_dummy_lasso_list=list(X_dummy.columns[sel_features.get_support()]) # returns the colums choose by Lasso",
"/usr/local/lib/python3.7/dist-packages/sklearn/linear_model/_coordinate_descent.py:476: ConvergenceWarning:\n\nObjective did not converge. You might want to increase the number of iterations. Duality gap: 248518.0697185291, tolerance: 5337.122497062281\n\n"
],
[
"rf.fit(x_train_dummy[X_dummy_lasso_list],y_train_dummy) # Apply Random forest after droping high correlated features\ny_pred_dummy=rf.predict(x_test_dummy[X_dummy_lasso_list]) # predict the x_test\ncompute_score(y_test_dummy,y_pred_dummy) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7577169228526915\n mean_absolute_error is: 83.77959064327486\n mean_squared_error is 13402.377974853802\n"
]
],
[
[
"### Here the Accuracy metrics are increase little bit and it's also reduce the dimension/Columns that are a good sign for us so we keep this change.",
"_____no_output_____"
]
],
[
[
"dummy_1=dummy[X_dummy_lasso_list].copy() # Make a copy of this dummy with lasso suggested features.\nx_train_dummy_1=x_train_dummy[X_dummy_lasso_list].copy() # Make a copy of this train with lasso suggested features.\nx_test_dummy_1=x_test_dummy[X_dummy_lasso_list].copy() # Make a copy of this test with lasso suggested features.",
"_____no_output_____"
]
],
[
[
"### Now see the Accuracy with boosting techniques",
"_____no_output_____"
]
],
[
[
"from sklearn.ensemble import AdaBoostRegressor # AdaBoostRegressor model initialize\nab=AdaBoostRegressor() # AdaBoostRegressor model initialize\nab.fit(x_train_dummy_1,y_train) # fitting the x_train,y_train\ny_pred=ab.predict(x_test_dummy_1) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.47670071269417835\n mean_absolute_error is: 143.598458500599\n mean_squared_error is 28947.35746723262\n"
],
[
"from sklearn.ensemble import GradientBoostingRegressor\ngb=GradientBoostingRegressor() # GradientBoostingRegressor model initialize\ngb.fit(x_train_dummy_1,y_train) # fitting the x_train,y_train\ny_pred=ab.predict(x_test_dummy_1) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.47670071269417835\n mean_absolute_error is: 143.598458500599\n mean_squared_error is 28947.35746723262\n"
],
[
"from xgboost import XGBRegressor # importing the XGBRegressor for training\nxg=XGBRegressor() # XGBRegressor model initialize\nxg.fit(x_train_dummy_1.to_numpy(),y_train.to_numpy()) # fitting the x_train,y_train\ny_pred=xg.predict(x_test_dummy_1.to_numpy()) # predict the x_test\ncompute_score(y_test.to_numpy(),y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"[12:03:17] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\nr2_score is: 0.6757457844630705\n mean_absolute_error is: 107.25756443313688\n mean_squared_error is 17936.777127539135\n"
],
[
"from sklearn.ensemble import RandomForestRegressor # RandomForestRegressor importing the ExtraTreesRegressor\nrf=RandomForestRegressor() # RandomForestRegressor model initialize\nrf.fit(x_train_dummy_1,y_train) # fitting the x_train,y_train\ny_pred=rf.predict(x_test_dummy_1) # predict the x_test\ncompute_score(y_test,y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7554795437457226\n mean_absolute_error is: 81.41760233918129\n mean_squared_error is 13526.143121052632\n"
]
],
[
[
"## So, RandomForestRegressor and XGBRegressor gives us the best accuracy so we will keep these models for Hyperparameter Tunning.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import GridSearchCV # importing GridSearchCVfor hyperparameter\n# when use hyperthread, xgboost may become slower\nparameters = {'nthread':[4], # Parameters that we want to pass\n 'objective':['reg:linear'],\n 'learning_rate': [.03, 0.05, .07], #so called `eta` value\n 'max_depth': [5, 6, 7],\n 'min_child_weight': [4],\n 'silent': [1],\n 'subsample': [0.7],\n 'colsample_bytree': [0.7],\n 'n_estimators': [500]}\ngrid=GridSearchCV(estimator=xg,param_grid=parameters,cv=5,verbose=3) # initialize the GridSearchCV",
"_____no_output_____"
],
[
"grid.fit(x_train_dummy_1.to_numpy(),y_train.to_numpy()) # fitting the GridSearchCV with x_train_dummy_1 and y_train",
"Fitting 5 folds for each of 9 candidates, totalling 45 fits\n[CV] colsample_bytree=0.7, learning_rate=0.03, max_depth=5, min_child_weight=4, n_estimators=500, nthread=4, objective=reg:linear, silent=1, subsample=0.7 \n"
],
[
"y_pred=grid.predict(x_test_dummy_1.to_numpy()) # predict the x_test\ncompute_score(y_test.to_numpy(),y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7261140486319398\n mean_absolute_error is: 91.78145095200567\n mean_squared_error is 15150.554819829056\n"
],
[
"from sklearn.model_selection import RandomizedSearchCV # importing RandomizedSearchCV hyperparameter\nparameters = { # Parameters that we want to pass\n 'num_boost_round': [10, 25, 50],\n 'eta': [0.05, 0.1, 0.3],\n 'max_depth': [3, 4, 5],\n 'subsample': [0.9, 1.0],\n 'colsample_bytree': [0.9, 1.0],\n }\nrandom=RandomizedSearchCV(estimator=xg,param_distributions=parameters,cv=5,verbose=3) # initialize the RandomizedSearchCV",
"_____no_output_____"
],
[
"random.fit(x_train_dummy_1.to_numpy(),y_train.to_numpy()) # fitting the RandomizedSearchCV with x_train_dummy_1 and y_train",
"Fitting 5 folds for each of 10 candidates, totalling 50 fits\n[CV] subsample=0.9, num_boost_round=10, max_depth=3, eta=0.1, colsample_bytree=0.9 \n[12:16:39] WARNING: /workspace/src/objective/regression_obj.cu:152: reg:linear is now deprecated in favor of reg:squarederror.\n"
],
[
"y_pred=random.predict(x_test_dummy_1.to_numpy()) # predict the x_test\ncompute_score(y_test.to_numpy(),y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7340901444358803\n mean_absolute_error is: 95.0827220002113\n mean_squared_error is 14709.340963761595\n"
],
[
"from sklearn.model_selection import RandomizedSearchCV\n# Number of trees in random forest\nn_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]\n# Number of features to consider at every split\nmax_features = ['auto', 'sqrt']\n# Maximum number of levels in tree\nmax_depth = [int(x) for x in np.linspace(10, 110, num = 11)]\nmax_depth.append(None)\n# Minimum number of samples required to split a node\nmin_samples_split = [2, 5, 10]\n# Minimum number of samples required at each leaf node\nmin_samples_leaf = [1, 2, 4]\n# Method of selecting samples for training each tree\nbootstrap = [True, False]\n# Create the random grid\nrandom_grid = {'n_estimators': n_estimators,\n 'max_features': max_features,\n 'max_depth': max_depth,\n 'min_samples_split': min_samples_split,\n 'min_samples_leaf': min_samples_leaf,\n 'bootstrap': bootstrap}\nrs=RandomizedSearchCV(estimator=rf,param_distributions=random_grid,cv=5,verbose=3) # initialize the GridSearchCV\nrs.fit(x_train_dummy_1.to_numpy(),y_train.to_numpy()) # fitting the GridSearchCV with x_train_dummy_1 and y_train",
"Fitting 5 folds for each of 10 candidates, totalling 50 fits\n[CV] n_estimators=1600, min_samples_split=2, min_samples_leaf=1, max_features=auto, max_depth=20, bootstrap=False \n"
],
[
"y_pred=rs.predict(x_test_dummy_1.to_numpy()) # predict the x_test\ncompute_score(y_test.to_numpy(),y_pred) # computing the r2_score,mean_absolute_error and mean_squared_error",
"r2_score is: 0.7824926792226987\n mean_absolute_error is: 82.07351771897615\n mean_squared_error is 12031.856948815166\n"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a24cb3112d9558956b1f60054508df8ae5c73f0
| 56,825 |
ipynb
|
Jupyter Notebook
|
Machine_Learning_Fundamentals/Part III - Multivariate K-Nearest Neighbor.ipynb
|
LuisOrtizF/DataScience
|
a2456da3fd796ef2969e2988153d660b41382e67
|
[
"MIT"
] | 2 |
2017-11-20T02:58:00.000Z
|
2018-02-18T15:28:53.000Z
|
Machine_Learning_Fundamentals/Part III - Multivariate K-Nearest Neighbor.ipynb
|
LuisOrtizF/DataScience
|
a2456da3fd796ef2969e2988153d660b41382e67
|
[
"MIT"
] | null | null | null |
Machine_Learning_Fundamentals/Part III - Multivariate K-Nearest Neighbor.ipynb
|
LuisOrtizF/DataScience
|
a2456da3fd796ef2969e2988153d660b41382e67
|
[
"MIT"
] | 3 |
2019-06-13T00:55:46.000Z
|
2021-02-11T21:02:27.000Z
| 60.97103 | 1,078 | 0.59634 |
[
[
[
"1. Recap\n==\n\nIn the last mission, we explored how to use a simple k-nearest neighbors machine learning model that used just one feature, or attribute, of the listing to predict the rent price. We first relied on the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> column, which describes the number of people a living space can comfortably accommodate. Then, we switched to the <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> column and observed an improvement in accuracy. While these were good features to become familiar with the basics of machine learning, it's clear that using just a single feature to compare listings doesn't reflect the reality of the market. An apartment that can accommodate 4 guests in a popular part of Washington D.C. will rent for much higher than one that can accommodate 4 guests in a crime ridden area.\n\nThere are 2 ways we can tweak the model to try to improve the accuracy (decrease the RMSE during validation):\n\n- increase the number of attributes the model uses to calculate similarity when ranking the closest neighbors\n- increase <span style=\"background-color: #F9EBEA; color:##C0392B\">k</span>, the number of nearby neighbors the model uses when computing the prediction\n\n\nIn this mission, we'll focus on increasing the number of attributes the model uses. When selecting more attributes to use in the model, we need to watch out for columns that don't work well with the distance equation. This includes columns containing:\n\n- non-numerical values (e.g. city or state)\n - Euclidean distance equation expects numerical values\n- missing values\n - distance equation expects a value for each observation and attribute\n- non-ordinal values (e.g. latitude or longitude)\n - ranking by Euclidean distance doesn't make sense if all attributes aren't ordinal\n \nIn the following code screen, we've read the <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_airbnb.csv</span> dataset from the last mission into pandas and brought over the data cleaning changes we made. Let's first look at the first row's values to identify any columns containing non-numerical or non-ordinal values. In the next screen, we'll drop those columns and then look for missing values in each of the remaining columns.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Use the <span style=\"background-color: #F9EBEA; color:##C0392B\">DataFrame.info()</span> method to return the number of non-null values in each column.\n\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nnp.random.seed(1)\n\ndc_listings = pd.read_csv('dc_airbnb.csv')\ndc_listings = dc_listings.loc[np.random.permutation(len(dc_listings))]\nstripped_commas = dc_listings['price'].str.replace(',', '')\nstripped_dollars = stripped_commas.str.replace('$', '')\ndc_listings['price'] = stripped_dollars.astype('float')\ndc_listings.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3723 entries, 574 to 1061\nData columns (total 19 columns):\nhost_response_rate 3289 non-null object\nhost_acceptance_rate 3109 non-null object\nhost_listings_count 3723 non-null int64\naccommodates 3723 non-null int64\nroom_type 3723 non-null object\nbedrooms 3702 non-null float64\nbathrooms 3696 non-null float64\nbeds 3712 non-null float64\nprice 3723 non-null float64\ncleaning_fee 2335 non-null object\nsecurity_deposit 1426 non-null object\nminimum_nights 3723 non-null int64\nmaximum_nights 3723 non-null int64\nnumber_of_reviews 3723 non-null int64\nlatitude 3723 non-null float64\nlongitude 3723 non-null float64\ncity 3723 non-null object\nzipcode 3714 non-null object\nstate 3723 non-null object\ndtypes: float64(6), int64(5), object(8)\nmemory usage: 581.7+ KB\n"
]
],
[
[
"2. Removing features\n==\n\nThe following columns contain non-numerical values:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">room_type</span>: e.g. **Private room**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">city</span>: e.g. **Washington**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">state</span>: e.g. **DC**\n\nwhile these columns contain numerical but non-ordinal values:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">latitude</span>: e.g. **38.913458**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">longitude</span>: e.g. **-77.031**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">zipcode</span>: e.g. **20009**\n\n\nGeographic values like these aren't ordinal, because a smaller numerical value doesn't directly correspond to a smaller value in a meaningful way. For example, the zip code 20009 isn't smaller or larger than the zip code 75023 and instead both are unique, identifier values. Latitude and longitude value pairs describe a point on a geographic coordinate system and different equations are used in those cases (e.g. [haversine](https://en.wikipedia.org/wiki/Haversine_formula)).\n\nWhile we could convert the <span style=\"background-color: #F9EBEA; color:##C0392B\">host_response_rate</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">host_acceptance_rate</span> columns to be numerical (right now they're object data types and contain the <span style=\"background-color: #F9EBEA; color:##C0392B\">%</span> sign), these columns describe the host and not the living space itself. Since a host could have many living spaces and we don't have enough information to uniquely group living spaces to the hosts themselves, let's avoid using any columns that don't directly describe the living space or the listing itself:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">host_response_rate</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">host_acceptance_rate</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">host_listings_count</span>\n\nLet's remove these 9 columns from the Dataframe\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Remove the 9 columns we discussed above from <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span>:\n - 3 containing non-numerical values\n - 3 containing numerical but non-ordinal values\n - 3 describing the host instead of the living space itself\n2. Verify the number of null values of each remain columns\n\n",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nnp.random.seed(1)\n\ndc_listings = pd.read_csv('dc_airbnb.csv')\ndc_listings = dc_listings.loc[np.random.permutation(len(dc_listings))]\nstripped_commas = dc_listings['price'].str.replace(',', '')\nstripped_dollars = stripped_commas.str.replace('$', '')\ndc_listings['price'] = stripped_dollars.astype('float')\ncolumns = ['room_type', 'city', 'state', 'latitude', 'longitude', 'zipcode', 'host_response_rate','host_acceptance_rate','host_listings_count']\ndc_listings.drop(columns, inplace=True, axis=1)\ndc_listings.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3723 entries, 574 to 1061\nData columns (total 10 columns):\naccommodates 3723 non-null int64\nbedrooms 3702 non-null float64\nbathrooms 3696 non-null float64\nbeds 3712 non-null float64\nprice 3723 non-null float64\ncleaning_fee 2335 non-null object\nsecurity_deposit 1426 non-null object\nminimum_nights 3723 non-null int64\nmaximum_nights 3723 non-null int64\nnumber_of_reviews 3723 non-null int64\ndtypes: float64(4), int64(4), object(2)\nmemory usage: 319.9+ KB\n"
]
],
[
[
"3. Handling missing values\n==\n\nOf the remaining columns, 3 columns have a few missing values (less than 1% of the total number of rows):\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">beds</span>\n\nSince the number of rows containing missing values for one of these 3 columns is low, we can select and remove those rows without losing much information. There are also 2 columns have a large number of missing values:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">cleaning_fee</span> - 37.3% of the rows\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">security_deposit</span> - 61.7% of the rows\n\nand we can't handle these easily. We can't just remove the rows containing missing values for these 2 columns because we'd miss out on the majority of the observations in the dataset. Instead, let's remove these 2 columns entirely from consideration.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Drop the <span style=\"background-color: #F9EBEA; color:##C0392B\">cleaning_fee</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">security_deposit</span> columns from <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span>.\n2. Then, remove all rows that contain a missing value for the <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>, <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span>, or <span style=\"background-color: #F9EBEA; color:##C0392B\">beds</span> column from <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span>.\n - You can accomplish this by using the [Dataframe method dropna()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.dropna.html) and setting the <span style=\"background-color: #F9EBEA; color:##C0392B\">axis</span> parameter to **0**.\n - Since only the <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>, <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">beds</span> columns contain any missing values, rows containing missing values in these columns will be removed.\n3. Display the null value counts for the updated <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span> Dataframe to confirm that there are no missing values left.",
"_____no_output_____"
]
],
[
[
"dc_listings.drop(['cleaning_fee','security_deposit'], inplace=True, axis=1)\ndc_listings = dc_listings.replace('', np.nan)\ndc_listings.dropna(how='any', inplace=True)\ndc_listings.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3671 entries, 574 to 1061\nData columns (total 8 columns):\naccommodates 3671 non-null int64\nbedrooms 3671 non-null float64\nbathrooms 3671 non-null float64\nbeds 3671 non-null float64\nprice 3671 non-null float64\nminimum_nights 3671 non-null int64\nmaximum_nights 3671 non-null int64\nnumber_of_reviews 3671 non-null int64\ndtypes: float64(4), int64(4)\nmemory usage: 258.1 KB\n"
]
],
[
[
"4. Normalize columns\n==\n\nHere's how the <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span> Dataframe looks like after all the changes we made:\n\n| accommodates | bedrooms | bathrooms | beds | price | minimum_nights | maximum_nights | number_of_reviews |\n|--------------|----------|-----------|------|-------|----------------|----------------|-------------------|\n| 2 | 1.0 | 1.0 | 1.0 | 125.0 | 1 | 4 | 149 |\n| 2 | 1.0 | 1.5 | 1.0 | 85.0 | 1 | 30 | 49 |\n| 1 | 1.0 | 0.5 | 1.0 | 50.0 | 1 | 1125 | 1 |\n| 2 | 1.0 | 1.0 | 1.0 | 209.0 | 4 | 730 | 2 |\n| 12 | 5.0 | 2.0 | 5.0 | 215.0 | 2 | 1825 | 34 |\n\nYou may have noticed that while the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span>, <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>, <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span>, <span style=\"background-color: #F9EBEA; color:##C0392B\">beds</span>, and <span style=\"background-color: #F9EBEA; color:##C0392B\">minimum_nights</span> columns hover between 0 and 12 (at least in the first few rows), the values in the <span style=\"background-color: #F9EBEA; color:##C0392B\">maximum_nights</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">number_of_reviews</span> columns span much larger ranges. For example, the <span style=\"background-color: #F9EBEA; color:##C0392B\">maximum_nights</span> column has values as low as 4 and high as 1825, in the first few rows itself. If we use these 2 columns as part of a k-nearest neighbors model, these attributes could end up having an outsized effect on the distance calculations because of the largeness of the values.\n\nFor example, 2 living spaces could be identical across every attribute but be vastly different just on the <span style=\"background-color: #F9EBEA; color:##C0392B\">maximum_nights</span> column. If one listing had a <span style=\"background-color: #F9EBEA; color:##C0392B\">maximum_nights</span> value of 1825 and the other a <span style=\"background-color: #F9EBEA; color:##C0392B\">maximum_nights</span> value of 4, because of the way Euclidean distance is calculated, these listings would be considered very far apart because of the outsized effect the largeness of the values had on the overall Euclidean distance. To prevent any single column from having too much of an impact on the distance, we can **normalize** all of the columns to have a mean of 0 and a standard deviation of 1.\n\nNormalizing the values in each columns to the [standard normal distribution](https://en.wikipedia.org/wiki/Normal_distribution#Standard_normal_distribution) (mean of 0, standard deviation of 1) preserves the distribution of the values in each column while aligning the scales. To normalize the values in a column to the standard normal distribution, you need to:\n\n- from each value, subtract the mean of the column\n- divide each value by the standard deviation of the column\n\nHere's the mathematical formula describing the transformation that needs to be applied for all values in a column:\n\n$\\displaystyle z= \\frac{x − \\mu}{\\sigma}$\n\nwhere x is a value in a specific column, $\\mu$ is the mean of all the values in the column, and $\\sigma$ is the standard deviation of all the values in the column. Here's what the corresponding code, using pandas, looks like:\n\n>```python\n# Subtract each value in the column by the mean.\nfirst_transform = dc_listings['maximum_nights'] - dc_listings['maximum_nights'].mean()\n# Divide each value in the column by the standard deviation.\nnormalized_col = first_transform / dc_listings['maximum_nights'].std()\n```\n\nTo apply this transformation across all of the columns in a Dataframe, you can use the corresponding Dataframe methods mean() and std():\n\n>```python\nnormalized_listings = (dc_listings - dc_listings.mean()) / (dc_listings.std())\n```\n\nThese methods were written with mass column transformation in mind and when you call <span style=\"background-color: #F9EBEA; color:##C0392B\">mean()</span> or <span style=\"background-color: #F9EBEA; color:##C0392B\">std()</span>, the appropriate column means and column standard deviations are used for each value in the Dataframe. Let's now normalize all of the feature columns in <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span>.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Normalize all of the feature columns in <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span> and assign the new Dataframe containing just the normalized feature columns to <span style=\"background-color: #F9EBEA; color:##C0392B\">normalized_listings</span>.\n2. Add the price column from <span style=\"background-color: #F9EBEA; color:##C0392B\">dc_listings</span> to <span style=\"background-color: #F9EBEA; color:##C0392B\">normalized_listings</span>.\n3. Display the first 3 rows in <span style=\"background-color: #F9EBEA; color:##C0392B\">normalized_listings</span>.",
"_____no_output_____"
]
],
[
[
"normalized_listings = (dc_listings - dc_listings.mean()) / (dc_listings.std())\nnormalized_listings['price'] = dc_listings['price']\nnormalized_listings.head(3)",
"_____no_output_____"
]
],
[
[
"5. Euclidean distance for multivariate case\n==\n\nIn the last mission, we trained 2 univariate k-nearest neighbors models. The first one used the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> attribute while the second one used the <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> attribute. Let's now train a model that uses **both** attributes when determining how similar 2 living spaces are. Let's refer to the Euclidean distance equation again to see what the distance calculation using 2 attributes would look like:\n\n$\\displaystyle d = \\sqrt{(q_1 - p_1)^2 + (q_2 - p_2)^2 + \\ldots + (q_n - p_n)^2}$\n\nSince we're using 2 attributes, the distance calculation would look like:\n\n$\\displaystyle d = \\sqrt{(accommodates_1 - accomodates_2)^2 + (bathrooms_1 - bathrooms_2)^2}$\n\n\nTo find the distance between 2 living spaces, we need to calculate the squared difference between both <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> values, the squared difference between both <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> values, add them together, and then take the square root of the resulting sum. Here's what the Euclidean distance between the first 2 rows in <span style=\"background-color: #F9EBEA; color:##C0392B\">normalized_listings</span> looks like:\n\n<img width=\"600\" alt=\"creating a repo\" src=\"https://drive.google.com/uc?export=view&id=15uoTMT1rzRLx9T8kIbsOWw7HaTmdBP0o\">\n\n\nSo far, we've been calculating Euclidean distance ourselves by writing the logic for the equation ourselves. We can instead use the [distance.euclidean()](http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.euclidean.html) function from <span style=\"background-color: #F9EBEA; color:##C0392B\">scipy.spatial</span>, which takes in 2 vectors as the parameters and calculates the Euclidean distance between them. The <span style=\"background-color: #F9EBEA; color:##C0392B\">euclidean()</span> function expects:\n\n- both of the vectors to be represented using a **list-like** object (Python list, NumPy array, or pandas Series)\n- both of the vectors must be 1-dimensional and have the same number of elements\n\nHere's a simple example:\n\n>```python\nfrom scipy.spatial import distance\nfirst_listing = [-0.596544, -0.439151]\nsecond_listing = [-0.596544, 0.412923]\ndist = distance.euclidean(first_listing, second_listing)\n```\n\nLet's use the <span style=\"background-color: #F9EBEA; color:##C0392B\">euclidean()</span> function to calculate the Euclidean distance between 2 rows in our dataset to practice.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Calculate the Euclidean distance using only the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> features between the first row and fifth row in <span style=\"background-color: #F9EBEA; color:##C0392B\">normalized_listings</span> using the <span style=\"background-color: #F9EBEA; color:##C0392B\">distance.euclidean()</span> function.\n2. Assign the distance value to <span style=\"background-color: #F9EBEA; color:##C0392B\">first_fifth_distance</span> and display using the <span style=\"background-color: #F9EBEA; color:##C0392B\">print</span> function.\n",
"_____no_output_____"
]
],
[
[
"from scipy.spatial import distance\nvector1 = normalized_listings[['accommodates','bathrooms']].iloc[0]\nvector2 = normalized_listings[['accommodates', 'bathrooms']].iloc[14] \nfirst_fifth_distance = distance.euclidean(vector1, vector2)\nprint(first_fifth_distance)",
"0.49895477658834053\n"
]
],
[
[
"6. Introduction to scikit-learn\n==\n\nSo far, we've been writing functions from scratch to train the k-nearest neighbor models. While this is helpful deliberate practice to understand how the mechanics work, you can be more productive and iterate quicker by using a library that handles most of the implementation. In this screen, we'll learn about the [scikit-learn library](http://scikit-learn.org/), which is the most popular machine learning in Python. Scikit-learn contains functions for all of the major machine learning algorithms and a simple, unified workflow. Both of these properties allow data scientists to be incredibly productive when training and testing different models on a new dataset.\n\nThe scikit-learn workflow consists of 4 main steps:\n\n- instantiate the specific machine learning model you want to use\n- fit the model to the training data\n- use the model to make predictions\n- evaluate the accuracy of the predictions\n\n\nWe'll focus on the first 3 steps in this screen and the next screen. Each model in scikit-learn is implemented as a [separate class](http://scikit-learn.org/dev/modules/classes.html) and the first step is to identify the class we want to create an instance of. In our case, we want to use the [KNeighborsRegressor class](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor).\nAny model that helps us predict numerical values, like listing price in our case, is known as a **regression** model. The other main class of machine learning models is called classification, where we're trying to predict a label from a fixed set of labels (e.g. blood type or gender). The word **regressor** from the class name <span style=\"background-color: #F9EBEA; color:##C0392B\">KNeighborsRegressor</span> refers to the regression model class that we just discussed.\n\nScikit-learn uses a similar object-oriented style to Matplotlib and you need to instantiate an empty model first by calling the constructor:\n\n>```python\nfrom sklearn.neighbors import KNeighborsRegressor\nknn = KNeighborsRegressor()\n```\n\nIf you refer to the [documentation](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor), you'll notice that by default:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">n_neighbors:</span> the number of neighbors, is set to **5**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">algorithm:</span> for computing nearest neighbors, is set to **auto**\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">p:</span> set to **2**, corresponding to Euclidean distance\n\nLet's set the <span style=\"background-color: #F9EBEA; color:##C0392B\">algorithm</span> parameter to <span style=\"background-color: #F9EBEA; color:##C0392B\">brute</span> and leave the <span style=\"background-color: #F9EBEA; color:##C0392B\">n_neighbors</span> value as **5**, which matches the implementation we wrote in the last mission. If we leave the <span style=\"background-color: #F9EBEA; color:##C0392B\">algorithm</span> parameter set to the default value of <span style=\"background-color: #F9EBEA; color:##C0392B\">auto</span>, scikit-learn will try to use tree-based optimizations to improve performance (which are outside of the scope of this mission):\n\n>```python\nknn = KNeighborsRegressor(algorithm='brute')\n```\n",
"_____no_output_____"
],
[
"7. Fitting a model and making predictions\n==\n\nNow, we can fit the model to the data using the [fit method](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor.fit). For all models, the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method takes in 2 required parameters:\n\n- matrix-like object, containing the feature columns we want to use from the training set.\n- list-like object, containing correct target values.\n\nMatrix-like object means that the method is flexible in the input and either a Dataframe or a NumPy 2D array of values is accepted. This means you can select the columns you want to use from the Dataframe and use that as the first parameter to the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method.\n\nIf you recall from earlier in the mission, all of the following are acceptable list-like objects:\n\n- NumPy array\n- Python list\n- pandas Series object (e.g. when selecting a column)\n\nYou can select the target column from the Dataframe and use that as the second parameter to the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method:\n\n>```python\n# Split full dataset into train and test sets.\ntrain_df = normalized_listings.iloc[0:2792]\ntest_df = normalized_listings.iloc[2792:]\n# Matrix-like object, containing just the 2 columns of interest from training set.\ntrain_features = train_df[['accommodates', 'bathrooms']]\n# List-like object, containing just the target column, `price`.\ntrain_target = normalized_listings['price']\n# Pass everything into the fit method.\nknn.fit(train_features, train_target)\n```\n\n\nWhen the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method is called, scikit-learn stores the training data we specified within the KNearestNeighbors instance (<span style=\"background-color: #F9EBEA; color:##C0392B\">knn</span>). If you try passing in data containing missing values or non-numerical values into the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method, scikit-learn will return an error. Scikit-learn contains many such features that help prevent us from making common mistakes.\n\nNow that we specified the training data we want used to make predictions, we can use the [predict method](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor.predict) to make predictions on the test set. The <span style=\"background-color: #F9EBEA; color:##C0392B\">predict</span> method has only one required parameter:\n\n- matrix-like object, containing the feature columns from the dataset we want to make predictions on\n\nThe number of feature columns you use during both training and testing need to match or scikit-learn will return an error:\n\n>```python\npredictions = knn.predict(test_df[['accommodates', 'bathrooms']])\n```\n\nThe <span style=\"background-color: #F9EBEA; color:##C0392B\">predict()</span> method returns a NumPy array containing the predicted <span style=\"background-color: #F9EBEA; color:##C0392B\">price</span> values for the test set. You now have everything you need to practice the entire scikit-learn workflow.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Create an instance of the [KNeighborsRegressor](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor) class with the following parameters:\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">n_neighbors</span>: 5\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">algorithm</span>: brute\n2. Use the <span style=\"background-color: #F9EBEA; color:##C0392B\">fit</span> method to specify the data we want the k-nearest neighbor model to use. Use the following parameters:\n - training data, feature columns: just the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> columns, in that order, from <span style=\"background-color: #F9EBEA; color:##C0392B\">train_df</span>.\n - training data, target column: the <span style=\"background-color: #F9EBEA; color:##C0392B\">price</span> column from <span style=\"background-color: #F9EBEA; color:##C0392B\">train_df</span>.\n3. Call the <span style=\"background-color: #F9EBEA; color:##C0392B\">predict</span> method to make predictions on:\n - the <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span> columns from <span style=\"background-color: #F9EBEA; color:##C0392B\">test_df</span>\n - assign the resulting NumPy array of predicted price values to <span style=\"background-color: #F9EBEA; color:##C0392B\">predictions</span>.",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsRegressor\n\ntrain_df = normalized_listings.iloc[0:2792]\ntest_df = normalized_listings.iloc[2792:]\n\nknn = KNeighborsRegressor(n_neighbors=5, algorithm='brute')\ntrain_features = train_df[['accommodates', 'bathrooms']]\ntrain_target = train_df['price']\nknn.fit(train_features, train_target)\n\npredictions = knn.predict(test_df[['accommodates', 'bathrooms']])\nprint(predictions)",
"[ 80.8 251.2 89.4 80.8 80.8 80.8 189.8 167.8 167.8\n 199. 251.2 166.6 81. 276.8 80.8 80.8 80.8 166.6\n 76.2 982.2 80.8 245.8 167.8 216.2 80.8 80.8 167.8\n 189.8 225.8 81. 81. 80.8 80.8 80.8 80.8 80.8\n 80.8 166.6 225.8 245.4 225.8 80.8 81. 167.8 135.2\n 167.8 167.8 80.8 80.8 80.8 81. 80.8 80.8 80.8\n 188. 135.2 92.4 145.8 80.8 251.2 80.8 135.2 167.8\n 90.4 80.8 135.2 80.8 80.8 80.8 135.2 166.6 223.6\n 80.8 135.2 80.8 135.2 80.8 106.8 80.8 80.8 80.8\n 135.2 251.2 189.8 80.8 80.8 80.8 135.2 89.4 276.8\n 199. 81. 81. 80.8 80.8 304.6 135.2 135.2 135.2\n 167.8 80.8 135.2 80.8 216.2 167.8 80.8 81. 80.8\n 80.8 89.4 225.8 80.8 189.8 238. 106.8 81. 167.8\n 188. 277.6 135.2 80.8 167.8 80.8 167.8 80.8 534.\n 135.2 167.8 167.8 62.8 167.8 80.8 135.2 80.8 80.8\n 80.8 166.6 80.8 90.4 251.2 167.8 135.2 80.8 276.8\n 167.8 80.8 81. 81. 90.4 135.2 167.8 80.8 225.8\n 216.2 81. 167.8 276.8 135.2 80.8 80.8 167.8 80.8\n 276.8 135.2 167.8 166.6 80.8 251.2 80.8 80.8 189.8\n 80.8 216.2 188. 80.8 80.8 216.2 80.8 80.8 135.2\n 80.8 89.4 80.8 240.6 80.8 81. 80.8 167.8 189.8\n 167.8 80.8 90.4 80.8 276.8 166.6 80.8 81. 90.4\n 167.8 92.4 80.8 135.2 135.2 80.8 167.8 80.8 80.8\n 167.8 216.2 240.6 80.8 80.8 166.6 167.8 187.8 80.8\n 80.8 80.8 80.8 80.8 80.8 135.2 80.8 167.8 80.8\n 304.6 81. 238. 238. 167.8 135.2 81. 80.8 80.8\n 216.2 81. 135.2 80.8 240.6 251.2 81. 238. 238.\n 167.8 80.8 80.8 380. 167.8 135.2 167.8 80.8 80.8\n 81. 80.8 80.8 80.8 188. 80.8 80.8 167.8 80.8\n 135.2 199. 135.2 80.8 80.8 276.8 167.8 80.8 80.8\n 80.8 80.8 167.8 80.8 189.8 216.2 276.8 81. 225.8\n 85.8 135.2 167.8 80.8 80.8 80.8 216.2 304.6 81.\n 135.2 240.6 80.8 225.8 135.2 276.8 135.2 80.8 80.8\n 80.8 80.8 81. 80.8 80.8 80.8 80.8 81. 135.2\n 80.8 80.8 251.2 167.8 80.8 225.8 80.8 1002.2 80.8\n 199. 167.8 80.8 166.6 135.2 198.8 80.8 80.8 62.8\n 81. 188. 167.8 80.8 81. 80.8 80.8 619. 80.8\n 81. 80.8 135.2 80.8 81. 135.2 80.8 80.8 135.2\n 135.2 81. 81. 81. 167.8 135.2 81. 80.8 81.\n 80.8 188. 240.6 81. 167.8 135.2 80.8 80.8 245.8\n 490. 80.8 81. 504.8 80.8 188. 80.8 276.8 80.8\n 145.8 490. 612.2 92.4 80.8 166.6 80.8 240.6 80.8\n 310.8 80.8 251.2 828.2 135.2 80.8 167.8 80.8 199.\n 225.8 80.8 80.8 167.8 81. 167.8 90.4 216.2 80.8\n 81. 135.2 216.2 135.2 199. 80.8 199. 80.8 80.8\n 245.8 80.8 81. 90.4 216.2 1002.2 225.8 80.8 167.8\n 108.8 80.8 80.8 80.8 80.8 276.8 135.2 167.8 80.8\n 167.8 90.4 80.8 188. 80.8 80.8 135.2 80.8 277.6\n 251.2 80.8 80.8 167.8 279.8 276.8 135.2 80.8 92.4\n 199. 92.4 238. 240.6 81. 216.2 80.8 199. 80.8\n 80.8 80.8 188. 279.8 135.2 80.8 166.6 167.8 80.8\n 80.8 81. 135.2 189.8 167.8 80.8 81. 106.8 167.8\n 81. 62.8 80.8 277.6 167.8 80.8 80.8 80.8 80.8\n 166.6 167.8 92.4 80.8 167.8 81. 135.2 167.8 81.\n 80.8 216.2 81. 80.8 380. 81. 80.8 80.8 80.8\n 167.8 619. 135.2 216.2 80.8 80.8 80.8 135.2 135.2\n 80.8 167.8 135.2 245.8 167.8 135.2 80.8 166.6 199.\n 135.2 80.8 80.8 81. 135.2 166.6 81. 188. 81.\n 135.2 80.8 80.8 106.8 80.8 80.8 167.8 80.8 167.8\n 80.8 80.8 189.8 166.6 199. 80.8 80.8 92.4 60.8\n 80.8 166.6 80.8 80.8 135.2 135.2 135.2 135.2 80.8\n 216.2 167.8 80.8 135.2 80.8 80.8 80.8 80.8 135.2\n 80.8 80.8 80.8 80.8 80.8 276.8 90.4 135.2 80.8\n 80.8 251.2 80.8 81. 135.2 80.8 199. 189.8 80.8\n 80.8 253. 81. 80.8 80.8 106.8 80.8 80.8 80.8\n 80.8 167.8 81. 167.8 135.2 167.8 167.8 90.4 80.8\n 80.8 167.8 199. 240.6 135.2 80.8 81. 80.8 225.8\n 135.2 167.8 167.8 135.2 166.6 167.8 80.8 225.8 80.8\n 80.8 207.8 81. 80.8 80.8 80.8 189.8 199. 135.2\n 80.8 80.8 80.8 207.8 80.8 80.8 80.8 135.2 80.8\n 189.8 135.2 135.2 106.8 80.8 167.8 80.8 80.8 276.8\n 167.8 90.4 80.8 81. 80.8 80.8 135.2 80.8 135.2\n 135.2 189.8 80.8 167.8 240.6 199. 166.6 251.2 167.8\n 135.2 199. 80.8 238. 251.2 135.2 279.8 167.8 80.8\n 167.8 80.8 80.8 80.8 81. 167.8 80.8 135.2 135.2\n 80.8 279.8 167.8 216.2 80.8 304.6 135.2 167.8 251.2\n 81. 62.8 90.4 81. 216.2 251.2 167.8 188. 80.8\n 135.2 167.8 80.8 166.6 145.8 80.8 166.6 81. 167.8\n 198.8 80.8 189.8 80.8 80.8 80.8 688.8 80.8 251.2\n 167.8 80.8 80.8 80.8 167.8 80.8 80.8 80.8 80.8\n 167.8 80.8 166.6 80.8 207.8 80.8 225.8 80.8 167.8\n 80.8 167.8 80.8 167.8 216.2 167.8 166.6 80.8 189.8\n 135.2 80.8 199. 251.2 238. 80.8 277.6 167.8 80.8\n 81. 167.8 135.2 92.4 80.8 299.8 80.8 135.2 135.2\n 80.8 135.2 80.8 167.8 240.6 166.6 80.8 296.6 90.4\n 383.8 167.8 80.8 80.8 80.8 80.8 80.8 276.8 80.8\n 380. 80.8 216.2 167.8 80.8 81. 80.8 80.8 245.4\n 167.8 80.8 216.2 80.8 90.4 80.8 80.8 80.8 167.8\n 81. 80.8 135.2 225.8 135.2 135.2 80.8 80.8 166.6\n 80.8 167.8 240.6 62.8 167.8 80.8 167.8 166.6 80.8\n 90.4 135.2 80.8 135.2 135.2 167.8 80.8 80.8 199.\n 80.8 167.8 167.8 80.8 80.8 80.8 445.4 167.8 167.8\n 167.8 225.8 80.8 135.2 135.2 135.2 80.8 534. 166.6\n 80.8 80.8 167.8 80.8 80.8 189.8 166.6 80.8 166.6\n 80.8 81. 80.8 80.8 167.8 80.8 550.2 216.2 189.8\n 216.2 81. 80.8 199. 80.8 135.2 85.8 80.8 80.8\n 80.8 167.8 240.6 199. 80.8 135.2 80.8 80.8 199.\n 304.6 81. 81. 85.8 198.8 368.6]\n"
]
],
[
[
"8. Calculating MSE using Scikit-Learn\n==\n\nEarlier in this mission, we calculated the MSE and RMSE values using the pandas arithmetic operators to compare each predicted value with the actual value from the <span style=\"background-color: #F9EBEA; color:##C0392B\">price</span> column of our test set. Alternatively, we can instead use the [sklearn.metrics.mean_squared_error function()](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error). Once you become familiar with the different machine learning concepts, unifying your workflow using scikit-learn helps save you a lot of time and avoid mistakes.\n\nThe <span style=\"background-color: #F9EBEA; color:##C0392B\">mean_squared_error()</span> function takes in 2 inputs:\n\n- list-like object, representing the true values\n- list-like object, representing the predicted values using the model\n\nFor this function, we won't show any sample code and will leave it to you to understand the function [from the documentation](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error) itself to calculate the MSE and RMSE values for the predictions we just made.\n\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Use the <span style=\"background-color: #F9EBEA; color:##C0392B\">mean_squared_error</span> function to calculate the MSE value for the predictions we made in the previous screen.\n2. Assign the MSE value to <span style=\"background-color: #F9EBEA; color:##C0392B\">two_features_mse</span>.\n3. Calculate the RMSE value by taking the square root of the MSE value and assign to <span style=\"background-color: #F9EBEA; color:##C0392B\">two_features_rmse</span>.\n4. Display both of these error scores using the <span style=\"background-color: #F9EBEA; color:##C0392B\">print</span> function.",
"_____no_output_____"
]
],
[
[
"from sklearn.metrics import mean_squared_error\ntwo_features_mse = mean_squared_error(test_df['price'], predictions)\ntwo_features_rmse = np.sqrt(two_features_mse)\nprint('MSE two features:',two_features_mse, '\\nRMSE two features:',two_features_rmse)",
"MSE two features: 15660.3979522 \nRMSE two features: 125.141511707\n"
]
],
[
[
"9. Using more features\n==\n\nHere's a table comparing the MSE and RMSE values for the 2 univariate models from the last mission and the multivariate model we just trained:\n\n| feature(s) | MSE | RMSE |\n|-------------------------|---------|-------|\n| accommodates | 18646.5 | 136.6 |\n| bathrooms | 17333.4 | 131.7 |\n| accommodates, bathrooms | 15660.4 | 125.1 |\n\nAs you can tell, the model we trained using both features ended up performing better (lower error score) than either of the univariate models from the last mission. Let's now train a model using the following 4 features:\n\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span>\n- <span style=\"background-color: #F9EBEA; color:##C0392B\">number_of_reviews</span>\n\nScikit-learn makes it incredibly easy to swap the columns used during training and testing. We're going to leave this for you as a challenge to train and test a k-nearest neighbors model using these columns instead. Use the code you wrote in the last screen as a guide.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n\n1. Create a new instance of the [KNeighborsRegressor class](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html#sklearn.neighbors.KNeighborsRegressor) with the following parameters:\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">n_neighbors</span>: 5\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">algorithm</span>: brute\n2. Fit a model that uses the following columns from our training set (**train_df**):\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">accommodates</span>\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">bedrooms</span>\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">bathrooms</span>\n - <span style=\"background-color: #F9EBEA; color:##C0392B\">number_of_reviews</span>\n3. Use the model to make predictions on the test set (**test_df**) using the same columns. Assign the NumPy array of predictions to <span style=\"background-color: #F9EBEA; color:##C0392B\">four_predictions</span>.\n4. Use the <span style=\"background-color: #F9EBEA; color:##C0392B\">mean_squared_error()</span> function to calculate the MSE value for these predictions by comparing <span style=\"background-color: #F9EBEA; color:##C0392B\">four_predictions</span> with the price column from **test_df**. Assign the computed MSE value to <span style=\"background-color: #F9EBEA; color:##C0392B\">four_mse</span>.\n5. Calculate the RMSE value and assign to <span style=\"background-color: #F9EBEA; color:##C0392B\">four_rmse</span>.\n6. Display <span style=\"background-color: #F9EBEA; color:##C0392B\">four_mse</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">four_rmse</span> using the print function.\n",
"_____no_output_____"
]
],
[
[
"from sklearn.neighbors import KNeighborsRegressor\n\nfeatures = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews']\nknn = KNeighborsRegressor(n_neighbors=5, algorithm='brute')\n\nknn.fit(train_df[features], train_df['price'])\nfour_predictions = knn.predict(test_df[features])\nfour_mse = mean_squared_error(test_df['price'], four_predictions)\nfour_rmse = four_mse** (1/2)\nprint('MSE four features:', four_mse,'\\nRMSE four features:', four_rmse)",
"MSE four features: 13320.2306257 \nRMSE four features: 115.413303504\n"
]
],
[
[
"10. Using all features\n==\n\nSo far so good! As we increased the features the model used, we observed lower MSE and RMSE values:\n\n| feature(s) | MSE | RMSE |\n|------------------------------------------------------|---------|-------|\n| accommodates | 18646.5 | 136.6 |\n| bathrooms | 17333.4 | 131.7 |\n| accommodates, bathrooms | 15660.4 | 125.1 |\n| accommodates, bathrooms, bedrooms, number_of_reviews | 13320.2 | 115.4 |\n\nLet's take this to the extreme and use all of the potential features. We should expect the error scores to decrease since so far adding more features has helped do so.\n\n<br>\n<div class=\"alert alert-info\">\n<b>Exercise Start.</b>\n</div>\n\n**Description**: \n\n1. Use all of the columns, except for the <span style=\"background-color: #F9EBEA; color:##C0392B\">price</span> column, to train a k-nearest neighbors model using the same parameters for the <span style=\"background-color: #F9EBEA; color:##C0392B\">KNeighborsRegressor</span> class as the ones from the last few screens.\n2. Use the model to make predictions on the test set and assign the resulting NumPy array of predictions to <span style=\"background-color: #F9EBEA; color:##C0392B\">all_features_predictions</span>.\n3. Calculate the MSE and RMSE values and assign to <span style=\"background-color: #F9EBEA; color:##C0392B\">all_features_mse</span> and <span style=\"background-color: #F9EBEA; color:##C0392B\">all_features_rmse</span> accordingly.\n4. Use the **print** function to display both error scores.",
"_____no_output_____"
]
],
[
[
"features = ['accommodates', 'bedrooms', 'bathrooms', 'number_of_reviews', 'minimum_nights','maximum_nights','beds']\nknn = KNeighborsRegressor(n_neighbors=5, algorithm='brute')\nknn.fit(train_df[features], train_df['price'])\nall_features_predictions = knn.predict(test_df[features])\nall_features_mse = mean_squared_error(test_df['price'], all_features_predictions)\nall_features_rmse = all_features_mse ** (1/2)\nprint('MSE four features:',all_features_mse, '\\nRMSE four features:', all_features_rmse)",
"MSE four features: 15393.3686917 \nRMSE four features: 124.07001528\n"
]
],
[
[
"11. Next steps\n==\n\nInterestingly enough, the RMSE value actually increased to **125.1** when we used all of the features available to us. This means that selecting the right features is important and that using more features doesn't automatically improve prediction accuracy. We should re-phrase the lever we mentioned earlier from:\n\n- increase the number of attributes the model uses to calculate similarity when ranking the closest neighbors\n\nto:\n\n- select the relevant attributes the model uses to calculate similarity when ranking the closest neighbors\n\nThe process of selecting features to use in a model is known as **feature selection**.\n\nIn this mission, we prepared the data to be able to use more features, trained a few models using multiple features, and evaluated the different performance tradeoffs. We explored how using more features doesn't always improve the accuracy of a k-nearest neighbors model. In the next mission, we'll explore another knob for tuning k-nearest neighbor models - the k value.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a24d61cb7e73899e5146c0ee0c65076bc91b8e6
| 10,470 |
ipynb
|
Jupyter Notebook
|
context/acl2-notebooks/programming-tutorial/01 - Expressions in ACL2.ipynb
|
rubengamboa/acl2-docker-images
|
dde63c7291e6ca1f0587b151e12d596bb45e4c1d
|
[
"BSD-3-Clause"
] | null | null | null |
context/acl2-notebooks/programming-tutorial/01 - Expressions in ACL2.ipynb
|
rubengamboa/acl2-docker-images
|
dde63c7291e6ca1f0587b151e12d596bb45e4c1d
|
[
"BSD-3-Clause"
] | null | null | null |
context/acl2-notebooks/programming-tutorial/01 - Expressions in ACL2.ipynb
|
rubengamboa/acl2-docker-images
|
dde63c7291e6ca1f0587b151e12d596bb45e4c1d
|
[
"BSD-3-Clause"
] | null | null | null | 42.909836 | 485 | 0.586724 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a24dbe79fe916f91852e68e177ffe010d9c801b
| 14,508 |
ipynb
|
Jupyter Notebook
|
JupyterNotebookCode/tf_gpu_3-virtual_device.ipynb
|
DragonYong/Tensorflow
|
1f2b9fd81916515eb27f76827d31a61c31e03edb
|
[
"MIT"
] | null | null | null |
JupyterNotebookCode/tf_gpu_3-virtual_device.ipynb
|
DragonYong/Tensorflow
|
1f2b9fd81916515eb27f76827d31a61c31e03edb
|
[
"MIT"
] | 2 |
2020-06-01T04:32:46.000Z
|
2020-06-02T01:10:32.000Z
|
JupyterNotebookCode/tf_gpu_3-virtual_device.ipynb
|
DragonYong/Tensorflow
|
1f2b9fd81916515eb27f76827d31a61c31e03edb
|
[
"MIT"
] | null | null | null | 41.09915 | 298 | 0.574442 |
[
[
[
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport sklearn\nimport pandas as pd\nimport os\nimport sys\nimport time\nimport tensorflow as tf\n\nfrom tensorflow import keras\n\nprint(tf.__version__)\nprint(sys.version_info)\nfor module in mpl, np, pd, sklearn, tf, keras:\n print(module.__name__, module.__version__)\n",
"2.0.0-beta1\nsys.version_info(major=3, minor=5, micro=3, releaselevel='final', serial=0)\nmatplotlib 3.0.3\nnumpy 1.16.4\npandas 0.24.2\nsklearn 0.21.2\ntensorflow 2.0.0-beta1\ntensorflow.python.keras.api._v2.keras 2.2.4-tf\n"
],
[
"tf.debugging.set_log_device_placement(True)\ngpus = tf.config.experimental.list_physical_devices('GPU')\ntf.config.experimental.set_visible_devices(gpus[1], 'GPU')\ntf.config.experimental.set_virtual_device_configuration(\n gpus[1],\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3072),\n tf.config.experimental.VirtualDeviceConfiguration(memory_limit=3072)])\nprint(len(gpus))\nlogical_gpus = tf.config.experimental.list_logical_devices('GPU')\nprint(len(logical_gpus))\n",
"4\n2\n"
],
[
"fashion_mnist = keras.datasets.fashion_mnist\n(x_train_all, y_train_all), (x_test, y_test) = fashion_mnist.load_data()\nx_valid, x_train = x_train_all[:5000], x_train_all[5000:]\ny_valid, y_train = y_train_all[:5000], y_train_all[5000:]\n\nprint(x_valid.shape, y_valid.shape)\nprint(x_train.shape, y_train.shape)\nprint(x_test.shape, y_test.shape)",
"(5000, 28, 28) (5000,)\n(55000, 28, 28) (55000,)\n(10000, 28, 28) (10000,)\n"
],
[
"from sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nx_train_scaled = scaler.fit_transform(\n x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)\nx_valid_scaled = scaler.transform(\n x_valid.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)\nx_test_scaled = scaler.transform(\n x_test.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)\n",
"_____no_output_____"
],
[
"def make_dataset(images, labels, epochs, batch_size, shuffle=True):\n dataset = tf.data.Dataset.from_tensor_slices((images, labels))\n if shuffle:\n dataset = dataset.shuffle(10000)\n dataset = dataset.repeat(epochs).batch(batch_size).prefetch(50)\n return dataset\n\nbatch_size = 128\nepochs = 100\ntrain_dataset = make_dataset(x_train_scaled, y_train, epochs, batch_size)",
"Executing op TensorSliceDataset in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op ShuffleDataset in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op RepeatDataset in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op BatchDatasetV2 in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op PrefetchDataset in device /job:localhost/replica:0/task:0/device:CPU:0\n"
],
[
"model = keras.models.Sequential()\nmodel.add(keras.layers.Conv2D(filters=32, kernel_size=3,\n padding='same',\n activation='relu',\n input_shape=(28, 28, 1)))\nmodel.add(keras.layers.Conv2D(filters=32, kernel_size=3,\n padding='same',\n activation='relu'))\nmodel.add(keras.layers.MaxPool2D(pool_size=2))\nmodel.add(keras.layers.Conv2D(filters=64, kernel_size=3,\n padding='same',\n activation='relu'))\nmodel.add(keras.layers.Conv2D(filters=64, kernel_size=3,\n padding='same',\n activation='relu'))\nmodel.add(keras.layers.MaxPool2D(pool_size=2))\nmodel.add(keras.layers.Conv2D(filters=128, kernel_size=3,\n padding='same',\n activation='relu'))\nmodel.add(keras.layers.Conv2D(filters=128, kernel_size=3,\n padding='same',\n activation='relu'))\nmodel.add(keras.layers.MaxPool2D(pool_size=2))\nmodel.add(keras.layers.Flatten())\nmodel.add(keras.layers.Dense(128, activation='relu'))\nmodel.add(keras.layers.Dense(10, activation=\"softmax\"))\n\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer = \"sgd\",\n metrics = [\"accuracy\"])",
"Executing op RandomUniform in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op Sub in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op Mul in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op Add in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarIsInitializedOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op LogicalNot in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op Assert in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op AssignVariableOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op Fill in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\nExecuting op VarHandleOp in device /job:localhost/replica:0/task:0/device:GPU:0\n"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv2d (Conv2D) (None, 28, 28, 32) 320 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, 28, 28, 32) 9248 \n_________________________________________________________________\nmax_pooling2d (MaxPooling2D) (None, 14, 14, 32) 0 \n_________________________________________________________________\nconv2d_2 (Conv2D) (None, 14, 14, 64) 18496 \n_________________________________________________________________\nconv2d_3 (Conv2D) (None, 14, 14, 64) 36928 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, 7, 7, 64) 0 \n_________________________________________________________________\nconv2d_4 (Conv2D) (None, 7, 7, 128) 73856 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, 7, 7, 128) 147584 \n_________________________________________________________________\nmax_pooling2d_2 (MaxPooling2 (None, 3, 3, 128) 0 \n_________________________________________________________________\nflatten (Flatten) (None, 1152) 0 \n_________________________________________________________________\ndense (Dense) (None, 128) 147584 \n_________________________________________________________________\ndense_1 (Dense) (None, 10) 1290 \n=================================================================\nTotal params: 435,306\nTrainable params: 435,306\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"history = model.fit(train_dataset,\n steps_per_epoch = x_train_scaled.shape[0] // batch_size,\n epochs=10)",
"Executing op OptimizeDataset in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op ModelDataset in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op AnonymousIteratorV2 in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op MakeIterator in device /job:localhost/replica:0/task:0/device:CPU:0\nEpoch 1/10\nExecuting op IteratorGetNextSync in device /job:localhost/replica:0/task:0/device:CPU:0\nExecuting op ExpandDims in device /job:localhost/replica:0/task:0/device:GPU:0\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a24dca256c569a2d7657606e3c95de64ade34dc
| 49,234 |
ipynb
|
Jupyter Notebook
|
Mabry_ProblemSet5.ipynb
|
mmabry/CompTools_BSC4452
|
36f31e85c956587d755d96bb004ebc1e27739260
|
[
"MIT"
] | null | null | null |
Mabry_ProblemSet5.ipynb
|
mmabry/CompTools_BSC4452
|
36f31e85c956587d755d96bb004ebc1e27739260
|
[
"MIT"
] | null | null | null |
Mabry_ProblemSet5.ipynb
|
mmabry/CompTools_BSC4452
|
36f31e85c956587d755d96bb004ebc1e27739260
|
[
"MIT"
] | null | null | null | 133.788043 | 22,968 | 0.857091 |
[
[
[
"# Problem set 5\n### Copied database from /blue/bsc4452/share/Class_Files",
"_____no_output_____"
]
],
[
[
"# Import only the modules needed from sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import MetaData\nfrom sqlalchemy import Table, Column\nfrom sqlalchemy import Integer, String\nfrom sqlalchemy import sql, select, join, desc\nfrom sqlalchemy import func\n\n# Create a Engine object which is our handle into the database.\nengine = create_engine('sqlite:///world.sqlite')\n\n# Connect to the database\nconn = engine.connect()\n\n# Read the metadata from the existing database.\n# Since the database already exists and has tables defined, we can create Python objects based on these automatically.\nDBInfo=MetaData(engine)",
"_____no_output_____"
]
],
[
[
"## Question 1 (5 points):\n### What is the country with the latest year (most recent) of independence?",
"_____no_output_____"
]
],
[
[
"# Auto-create the country object basedon the metadata read into the DBInfo.\ncountry=Table('country', DBInfo, autoload=True)\n\n# SELECT Name, IndepYear FROM country ORDER BY IndepYear DESC LIMIT 1;\nquery=select([country.c.Name, country.c.IndepYear])\\\n .order_by(desc(country.c.IndepYear))\\\n .limit(1)\n\nresult = conn.execute(query)\n\n# Print results\nfor row in result:\n print(row)",
"('Palau', 1994)\n"
]
],
[
[
"## Question 2 (5 points):\n### There are several countries that have become independent since the country in your answer to question 1, add one to the database.",
"_____no_output_____"
]
],
[
[
"### Question 2 (5 points):\n\n## Make sure country code is available\n# query=select([country.c.Code, country.c.Name]).where(country.c.Code.like('SE%'))\n# result = conn.execute(query)\n# for row in result:\n# print(row)\n\n## Check to see what options there are for filling in data\n# print(country.insert())\n\n## Add insert for Serbia\nmy_insert_serbia=country.insert().values(Code='SER', Name='Serbia', Continent='Europe', Region='Eastern Europe', IndepYear='2006', Population='6963764')\n#print(my_insert_serbia)\n\nresult = conn.execute(my_insert_serbia)\n\n## Check to make sure it was added\nquery=select([country.c.Code, country.c.Name, country.c.Continent, country.c.Region, country.c.IndepYear, country.c.Population]).where(country.c.Name.like('Se%'))\n\nresult = conn.execute(query)\n\nfor row in result:\n print(row)\n",
"('SEN', 'Senegal', 'Africa', 'Western Africa', 1960, 9481000)\n('SYC', 'Seychelles', 'Africa', 'Eastern Africa', 1976, 77000)\n('SER', 'Serbia', 'Europe', 'Eastern Europe', 2006, 6963764)\n"
]
],
[
[
"## Question 3 (5 points):\n#### For the country added in question 2, find 2 cities to add to the cities table of the database.",
"_____no_output_____"
]
],
[
[
"# Auto-create the cities object basedon the metadata read into the DBInfo.\ncity=Table('city', DBInfo, autoload=True)\n\n## Check to see what options there are for filling in data\n#print(city.insert())\n\n## Add insert for Belgrade, Serbia (largest population)\nmy_insert_belgrade=city.insert().values(Name='Belgrade', CountryCode='SER', Population='1659440')\n#print(my_insert_belgrade)\nresult = conn.execute(my_insert_belgrade)\n\n## Add insert for Crna Trava, Serbia (smallest population)\nmy_insert_crnatrva=city.insert().values(Name='Crna Trava', CountryCode='SER', Population='1663')\n#print(my_insert_crnatrva)\nresult = conn.execute(my_insert_crnatrva)\n\n\n## Check to make sure the inserts were added\nquery=select([city.c.Name, city.c.CountryCode, city.c.Population]).where(city.c.CountryCode.like('SER'))\n\nresult = conn.execute(query)\n\nfor row in result:\n print(row)\n",
"('Belgrade', 'SER', 1659440)\n('Crna Trava', 'SER', 1663)\n"
]
],
[
[
"## Question 4 (5 points):\n### Using the LifeExpectancy data in the country table on the y-axis, plot this data against some other value.",
"_____no_output_____"
]
],
[
[
"# Import what is needed \nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n## Get data for dataframe\nquery=select([country.c.Name, country.c.Continent, country.c.IndepYear, country.c.LifeExpectancy, country.c.Population, country.c.GNP])\n\nresult = conn.execute(query)\n\n## Make pandas dataframe\ndf=pd.read_sql(query, conn, index_col='Name')\n\nprint(df)\n\n## Plot \nx = df['Continent']\ny = df['LifeExpectancy']\n\n\nplt.bar(x, y)\nplt.xlabel(\"Continent\", labelpad=20)\nplt.ylabel(\"LifeExpectancy\", labelpad=20)\nplt.title(\"LifeExpectancy vs Continent\", y=1.015, fontsize=22);\n\nplt.show()\n\n",
" Continent IndepYear LifeExpectancy Population GNP\nName \nAruba North America NaN 78.4 103000 828.0\nAfghanistan Asia 1919.0 45.9 22720000 5976.0\nAngola Africa 1975.0 38.3 12878000 6648.0\nAnguilla North America NaN 76.1 8000 63.2\nAlbania Europe 1912.0 71.6 3401200 3205.0\n... ... ... ... ... ...\nYugoslavia Europe 1918.0 72.4 10640000 17000.0\nSouth Africa Africa 1910.0 51.1 40377000 116729.0\nZambia Africa 1964.0 37.2 9169000 3377.0\nZimbabwe Africa 1980.0 37.8 11669000 5951.0\nSerbia Europe 2006.0 NaN 6963764 NaN\n\n[240 rows x 5 columns]\n"
]
],
[
[
"## Grad student extra credit (5 points, sorry undergrads, only question 4 counts as extra credit):\n### Plot LifeExpectancy vs the ratio of the total population of all the cities in the country divided by the total population of the country. This is an approximation of the % urban population in the country.",
"_____no_output_____"
]
],
[
[
"## New query for all the cities, country code, and pop\nquery=select([country.c.Code, country.c.Name, country.c.Population,country.c.LifeExpectancy, city.c.CountryCode, func.sum(city.c.Population)]).group_by(city.c.CountryCode).select_from(country.join(city, city.c.CountryCode == country.c.Code))\n\nresult = conn.execute(query)\n\n\n## Make pandas dataframe\ndf=pd.read_sql(query, conn, index_col='Name')\n\n## Make new column to get % urban population\nUP = df[\"sum_1\"] / df[\"Population\"]\ndf[\"UrbanPopulation\"] = UP\nprint(df)\n\n\n## Plot \nx = df['UrbanPopulation']\ny = df['LifeExpectancy']\n\nplt.scatter(x, y)\nplt.xlabel(\"%UrbanPopulation\", labelpad=20)\nplt.ylabel(\"LifeExpectancy\", labelpad=20)\nplt.title(\"LifeExpectancy vs %UrbanPopulation\", y=1.015, fontsize=22);\n\nplt.show()\n",
" Code Population LifeExpectancy CountryCode sum_1 \\\nName \nAruba ABW 103000 78.4 ABW 29034 \nAfghanistan AFG 22720000 45.9 AFG 2332100 \nAngola AGO 12878000 38.3 AGO 2561600 \nAnguilla AIA 8000 76.1 AIA 1556 \nAlbania ALB 3401200 71.6 ALB 270000 \n... ... ... ... ... ... \nYemen YEM 18112000 59.8 YEM 1743700 \nYugoslavia YUG 10640000 72.4 YUG 2189507 \nSouth Africa ZAF 40377000 51.1 ZAF 15196370 \nZambia ZMB 9169000 37.2 ZMB 2473500 \nZimbabwe ZWE 11669000 37.8 ZWE 2730420 \n\n UrbanPopulation \nName \nAruba 0.281883 \nAfghanistan 0.102645 \nAngola 0.198913 \nAnguilla 0.194500 \nAlbania 0.079384 \n... ... \nYemen 0.096273 \nYugoslavia 0.205781 \nSouth Africa 0.376362 \nZambia 0.269768 \nZimbabwe 0.233989 \n\n[233 rows x 6 columns]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a24ec482d94876c49e449979848c8c2f8e59d7f
| 2,366 |
ipynb
|
Jupyter Notebook
|
NoteBooks/Curso de Algebra lineal con Python/Apuntes/Untitled.ipynb
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | 1 |
2021-02-26T13:12:22.000Z
|
2021-02-26T13:12:22.000Z
|
NoteBooks/Curso de Algebra lineal con Python/Apuntes/Untitled.ipynb
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null |
NoteBooks/Curso de Algebra lineal con Python/Apuntes/Untitled.ipynb
|
Alejandro-sin/Learning_Notebooks
|
161d6bed4c7b1d171b45f61c0cc6fa91e9894aad
|
[
"MIT"
] | null | null | null | 24.142857 | 129 | 0.475909 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n#Tipos especiales de matrices: Identidad, Inversa, Singulares\n# Identidad\n\nidentidad = np.eye(4)\n#Me devuelve TODO flotante, los 1 y 0. ¿Porqué? Es el leemento neutro dentro de las matrices, es el 1 para los números.\n#La identidad no transforma el espacio.\n\n\nprint(identidad)\n\n#Transformación lienal, es tener un vector que transfomro cuando aplico o multiplico una matriz.\n\n# Inversa\n# A* X = AI\nA = np.array([[-3, 1], [-2, 1]])\ninversaA = np.linalg.inv(A)\nprint(f'Soy una matriz inversa \\n {inversaA}')\n\n#Identidad\n\nI = np.array([[1,0,1],[0,1,1],[-1,1,1]])\n\n\n#Matriz singular cuanod no existe la identidad.\nprint(f'Soy una matriz singular \\n {A.dot(inversaA)}')\n\n\n# Singular\n",
"[[1. 0. 0. 0.]\n [0. 1. 0. 0.]\n [0. 0. 1. 0.]\n [0. 0. 0. 1.]]\nSoy una matriz inversa \n [[-1. 1.]\n [-2. 3.]]\nSoy una matriz identidad \n [[1.00000000e+00 0.00000000e+00]\n [2.22044605e-16 1.00000000e+00]]\n"
],
[
"# Lo importante es entender que:\n# Puedo calcular mis incognitas con la propiedad de \n# Ax= b\n# x = b* A^-1\n\n\n\n#",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code"
]
] |
4a24f7e91b49f2ffad3dc175b7e50fc872c7be9a
| 8,109 |
ipynb
|
Jupyter Notebook
|
babilim/model/layers/roi_ops.ipynb
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1 |
2020-05-04T15:20:55.000Z
|
2020-05-04T15:20:55.000Z
|
babilim/model/layers/roi_ops.ipynb
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1 |
2019-11-28T09:03:20.000Z
|
2019-11-28T09:03:20.000Z
|
babilim/model/layers/roi_ops.ipynb
|
penguinmenac3/babilim
|
d3b1dd7c38a9de8f1e553cc5c0b2dfa62fe25c27
|
[
"MIT"
] | 1 |
2019-11-28T08:30:13.000Z
|
2019-11-28T08:30:13.000Z
| 34.360169 | 136 | 0.539031 |
[
[
[
"#convert",
"_____no_output_____"
]
],
[
[
"# babilim.model.layers.roi_ops\n\n> Operations for region of interest extraction.",
"_____no_output_____"
]
],
[
[
"#export\nfrom babilim.core.annotations import RunOnlyOnce\nfrom babilim.core.module_native import ModuleNative",
"_____no_output_____"
],
[
"#export\ndef _convert_boxes_to_roi_format(boxes):\n \"\"\"\n Convert rois into the torchvision format.\n\n :param boxes: The roi boxes as a native tensor[B, K, 4].\n :return: The roi boxes in the format that roi pooling and roi align in torchvision require. Native tensor[B*K, 5].\n \"\"\"\n import torch\n concat_boxes = boxes.view((-1, 4))\n ids = torch.full_like(boxes[:, :, :1], 0)\n for i in range(boxes.shape[0]):\n ids[i, :, :] = i\n ids = ids.view((-1, 1))\n rois = torch.cat([ids, concat_boxes], dim=1)\n return rois",
"_____no_output_____"
],
[
"#export\nclass RoiPool(ModuleNative):\n def __init__(self, output_size, spatial_scale=1.0):\n \"\"\"\n Performs Region of Interest (RoI) Pool operator described in Fast R-CNN.\n\n Creates a callable object, when calling you can use these Arguments:\n * **features**: (Tensor[N, C, H, W]) input tensor\n * **rois**: (Tensor[N, K, 4]) the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from.\n * **return**: (Tensor[N, K, C, output_size[0], output_size[1]]) The feature maps crops corresponding to the input rois.\n \n Parameters to RoiPool constructor:\n :param output_size: (Tuple[int, int]) the size of the output after the cropping is performed, as (height, width)\n :param spatial_scale: (float) a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0\n \"\"\"\n super().__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n \n @RunOnlyOnce\n def _build_pytorch(self, features, rois):\n pass\n \n def _call_pytorch(self, features, rois):\n from torchvision.ops import roi_pool as _roi_pool\n torchvision_rois = _convert_boxes_to_roi_format(rois)\n\n result = _roi_pool(features, torchvision_rois, self.output_size, self.spatial_scale)\n\n # Fix output shape\n N, C, _, _ = features.shape\n result = result.view((N, -1, C, self.output_size[0], self.output_size[1]))\n return result\n\n @RunOnlyOnce\n def _build_tf(self, features, rois):\n # TODO implement\n raise NotImplementedError()\n \n def _call_tf(self, features, rois):\n # TODO implement\n raise NotImplementedError()",
"_____no_output_____"
],
[
"from babilim.core.tensor import Tensor\nimport numpy as np\n\nroi = RoiPool(output_size=(7, 4))\ntensor = Tensor(data=np.zeros((2,3,24,24), dtype=np.float32), trainable=False)\nrois = Tensor(data=np.array([[[0,0,12,12],[4,7,6,23]], [[0,0,12,12], [4,7,6,23]]], dtype=np.float32), trainable=False)\n\nprint(rois.shape)\nprint(tensor.shape)\nresult = roi(tensor, rois)\nprint(result.shape)",
"(2, 2, 4)\n(2, 3, 24, 24)\n(2, 2, 3, 7, 4)\n"
],
[
"#export\nclass RoiAlign(ModuleNative):\n def __init__(self, output_size, spatial_scale=1.0):\n \"\"\"\n Performs Region of Interest (RoI) Align operator described in Mask R-CNN.\n\n Creates a callable object, when calling you can use these Arguments:\n * **features**: (Tensor[N, C, H, W]) input tensor\n * **rois**: (Tensor[N, K, 4]) the box coordinates in (x1, y1, x2, y2) format where the regions will be taken from.\n * **return**: (Tensor[N, K, C, output_size[0], output_size[1]]) The feature maps crops corresponding to the input rois.\n \n Parameters to RoiAlign constructor:\n :param output_size: (Tuple[int, int]) the size of the output after the cropping is performed, as (height, width)\n :param spatial_scale: (float) a scaling factor that maps the input coordinates to the box coordinates. Default: 1.0\n \"\"\"\n super().__init__()\n self.output_size = output_size\n self.spatial_scale = spatial_scale\n \n @RunOnlyOnce\n def _build_pytorch(self, features, rois):\n pass\n \n def _call_pytorch(self, features, rois):\n from torchvision.ops import roi_align as _roi_align\n torchvision_rois = _convert_boxes_to_roi_format(rois)\n\n # :param aligned: (bool) If False, use the legacy implementation.\n # If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices.\n # This version in Detectron2\n result = _roi_align(features, torchvision_rois, self.output_size, self.spatial_scale, aligned=True)\n\n # Fix output shape\n N, C, _, _ = features.shape\n result = result.view((N, -1, C, self.output_size[0], self.output_size[1]))\n return result\n\n @RunOnlyOnce\n def _build_tf(self, features, rois):\n # TODO implement\n raise NotImplementedError()\n \n def _call_tf(self, features, rois):\n # TODO implement\n raise NotImplementedError()",
"_____no_output_____"
],
[
"from babilim.core.tensor import Tensor\nimport numpy as np\n\nroi = RoiAlign(output_size=(7, 4))\ntensor = Tensor(data=np.zeros((2,3,24,24), dtype=np.float32), trainable=False)\nrois = Tensor(data=np.array([[[0,0,12,12],[4,7,6,23]], [[0,0,12,12], [4,7,6,23]]], dtype=np.float32), trainable=False)\n\nprint(rois.shape)\nprint(tensor.shape)\nresult = roi(tensor, rois)\nprint(result.shape)",
"(2, 2, 4)\n(2, 3, 24, 24)\n(2, 2, 3, 7, 4)\n"
]
]
] |
[
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a250d8e0a2e0aa562b79055d6ae773456cc469e
| 164,649 |
ipynb
|
Jupyter Notebook
|
week_5/LSTM.ipynb
|
Zork777/ts_summer
|
f29e4a24a4df14d9a8af6d12eef400e8ddcb8cd2
|
[
"MIT"
] | 2 |
2021-05-17T09:45:16.000Z
|
2021-08-11T11:58:09.000Z
|
week_5/LSTM.ipynb
|
Zork777/ts_summer
|
f29e4a24a4df14d9a8af6d12eef400e8ddcb8cd2
|
[
"MIT"
] | null | null | null |
week_5/LSTM.ipynb
|
Zork777/ts_summer
|
f29e4a24a4df14d9a8af6d12eef400e8ddcb8cd2
|
[
"MIT"
] | 2 |
2021-05-24T18:50:59.000Z
|
2021-05-30T19:30:56.000Z
| 115.868403 | 64,248 | 0.837806 |
[
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys\nimport pathlib\n\nsys.path.append(str(pathlib.Path().cwd().parent))",
"_____no_output_____"
],
[
"from typing import Tuple\n\nfrom load_dataset import Dataset\nfrom plotting import plot_ts\n\ndataset = Dataset('../data/dataset/')",
"_____no_output_____"
]
],
[
[
"### В чем заключаются недостатки полносвязных сетей?\n* невозможность улавливать временные закономерности в контексте предыдущих точек (архитектурное ограничение)\n* фиксированный размер входных данных\n* фиксированный размер выходных данных",
"_____no_output_____"
],
[
"### Область применимости рекуретных сетей для задачи анализа временных рядов\n* большое количество экзогенных признаков, имеющих сложную нелинейную зависимость с целевым рядом\n* очень сложная временная структура имеющая наложение разных сезонных и цикличных паттернов\n* ряды с часто меняющимся паттерном, или большим количеством аномалий\n* когда есть необходимость в нефиксированной длине входных и выходных данных (например многомерные ряды, где для разных компонент хочется предоставить разное количество лагов) ",
"_____no_output_____"
],
[
"### Особенности подготовки данных - необходима нормализация данных, иначе сеть будет плохо сходиться и медленно обучаться.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom sklearn.preprocessing import MinMaxScaler",
"_____no_output_____"
],
[
"data = np.array(range(0, 100, 10)).reshape(-1, 1)",
"_____no_output_____"
],
[
"scaler = MinMaxScaler((0, 1))",
"_____no_output_____"
],
[
"scaler.fit(data)",
"_____no_output_____"
],
[
"transformed = scaler.transform(data)",
"_____no_output_____"
],
[
"transformed",
"_____no_output_____"
],
[
"inverse = scaler.inverse_transform(transformed)",
"_____no_output_____"
],
[
"inverse",
"_____no_output_____"
]
],
[
[
"### Особенность подготвки данных - обработка последовательностей разной длины.",
"_____no_output_____"
]
],
[
[
"from keras.preprocessing.sequence import pad_sequences",
"_____no_output_____"
],
[
"sequences = [\n [1, 2, 3, 4],\n [3, 4, 5],\n [5, 6],\n [3]\n]",
"_____no_output_____"
],
[
"pad_sequences(sequences, padding='pre')",
"_____no_output_____"
],
[
"pad_sequences(sequences, padding='post')",
"_____no_output_____"
],
[
"pad_sequences(sequences, maxlen=2)",
"_____no_output_____"
],
[
"pad_sequences(sequences, maxlen=2, truncating='post')",
"_____no_output_____"
]
],
[
[
"### Какие архитектуры lstm нас интересуют в контексте временных рядов?\n* one-to-one - предсказание следующей точки по предыдущей - нет\n* one-to-many - предсказание следующих N точeк про предыдущей - нет\n* many-to-one - one-step-ahead предсказание - в некоторой степени\n* many-to-many - предсказание вектора из следующих m точек по предыдущим n точкам - наибольший интерес",
"_____no_output_____"
],
[
"### Простая LSTM сеть",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential\nfrom keras.layers import LSTM, Dense",
"_____no_output_____"
],
[
"ts = dataset['daily-min-temperatures.csv']",
"_____no_output_____"
],
[
"ts.plot(figsize=(15, 5))",
"_____no_output_____"
],
[
"def transform_into_matrix(ts: pd.Series, num_lags: int) -> Tuple[np.array]:\n \"\"\"\n Transforms time series into lags matrix to allow\n applying supervised learning algorithms\n\n Parameters\n ------------\n ts\n Time series to transform\n num_lags\n Number of lags to use\n\n Returns\n --------\n train, test: np.arrays of shapes (ts-num_lags, num_lags), (num_lags,)\n \"\"\"\n\n ts_values = ts.values\n data = {}\n for i in range(num_lags + 1):\n data[f'lag_{num_lags - i}'] = np.roll(ts_values, -i)\n\n lags_matrix = pd.DataFrame(data)[:-num_lags]\n lags_matrix.index = ts.index[num_lags:]\n\n return lags_matrix.drop('lag_0', axis=1).values, lags_matrix['lag_0'].values",
"_____no_output_____"
],
[
"NUM_LAGS = 14\nX, y = transform_into_matrix(ts, NUM_LAGS)",
"_____no_output_____"
],
[
"X[0]",
"_____no_output_____"
],
[
"X = X.reshape((X.shape[0], X.shape[1], 1))",
"_____no_output_____"
],
[
"X[0]",
"_____no_output_____"
],
[
"split_idx = int(len(X)*0.8)\nX_train, X_test = X[:split_idx], X[split_idx:]\ny_train, y_test = y[:split_idx], y[split_idx:]",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(LSTM(50, activation='relu', input_shape=(NUM_LAGS, 1)))\nmodel.add(Dense(1))\nmodel.compile(optimizer='adam', loss='mse')",
"_____no_output_____"
],
[
"model.fit(X, y, epochs=100)",
"Epoch 1/100\n114/114 [==============================] - 1s 4ms/step - loss: 24.1341\nEpoch 2/100\n114/114 [==============================] - 0s 3ms/step - loss: 6.5200\nEpoch 3/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.1608\nEpoch 4/100\n114/114 [==============================] - 0s 3ms/step - loss: 6.1327\nEpoch 5/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.9045\nEpoch 6/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.1083\nEpoch 7/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.8903\nEpoch 8/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.3242\nEpoch 9/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.8450\nEpoch 10/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.6624\nEpoch 11/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.0762\nEpoch 12/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.9472\nEpoch 13/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7521\nEpoch 14/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.6699\nEpoch 15/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7272\nEpoch 16/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.6247\nEpoch 17/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.6212\nEpoch 18/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7601\nEpoch 19/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.8613\nEpoch 20/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7870\nEpoch 21/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7498\nEpoch 22/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.8784\nEpoch 23/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6167\nEpoch 24/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.9270\nEpoch 25/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.8153\nEpoch 26/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.0287\nEpoch 27/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7828\nEpoch 28/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.8370\nEpoch 29/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7299\nEpoch 30/100\n114/114 [==============================] - 0s 4ms/step - loss: 6.0772\nEpoch 31/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7117\nEpoch 32/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7788\nEpoch 33/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7784\nEpoch 34/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7097\nEpoch 35/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.8638\nEpoch 36/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4091\nEpoch 37/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5479\nEpoch 38/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6330\nEpoch 39/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4289\nEpoch 40/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.9727\nEpoch 41/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.8328\nEpoch 42/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7891\nEpoch 43/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7956\nEpoch 44/100\n114/114 [==============================] - 0s 3ms/step - loss: 6.0376\nEpoch 45/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7052\nEpoch 46/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5995\nEpoch 47/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5645\nEpoch 48/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6895\nEpoch 49/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5933\nEpoch 50/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5712\nEpoch 51/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5652\nEpoch 52/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.7904\nEpoch 53/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5501\nEpoch 54/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.7540\nEpoch 55/100\n114/114 [==============================] - 1s 5ms/step - loss: 5.4531\nEpoch 56/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.5386\nEpoch 57/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5832\nEpoch 58/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6248\nEpoch 59/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6139\nEpoch 60/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4946\nEpoch 61/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5391\nEpoch 62/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.5484\nEpoch 63/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.5955\nEpoch 64/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.6783\nEpoch 65/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.5206\nEpoch 66/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.8166\nEpoch 67/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5564\nEpoch 68/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4683\nEpoch 69/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4989\nEpoch 70/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5978\nEpoch 71/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.4930\nEpoch 72/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.2280\nEpoch 73/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5236\nEpoch 74/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6388\nEpoch 75/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5944\nEpoch 76/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.6239\nEpoch 77/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5954\nEpoch 78/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5532\nEpoch 79/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5001\nEpoch 80/100\n114/114 [==============================] - 1s 6ms/step - loss: 5.1741\nEpoch 81/100\n114/114 [==============================] - 1s 8ms/step - loss: 5.4916\nEpoch 82/100\n114/114 [==============================] - 1s 6ms/step - loss: 5.4639\nEpoch 83/100\n114/114 [==============================] - 1s 5ms/step - loss: 5.3415\nEpoch 84/100\n114/114 [==============================] - 1s 5ms/step - loss: 5.3598\nEpoch 85/100\n114/114 [==============================] - 1s 5ms/step - loss: 5.3223\nEpoch 86/100\n114/114 [==============================] - 1s 5ms/step - loss: 5.2536\nEpoch 87/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3643\nEpoch 88/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.3779\nEpoch 89/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.5052\nEpoch 90/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.4546\nEpoch 91/100\n114/114 [==============================] - 0s 4ms/step - loss: 5.3349\nEpoch 92/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3708\nEpoch 93/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3885\nEpoch 94/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.2938\nEpoch 95/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3233\nEpoch 96/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3413\nEpoch 97/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.2886\nEpoch 98/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.3147\nEpoch 99/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.2812\nEpoch 100/100\n114/114 [==============================] - 0s 3ms/step - loss: 5.2517\n"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
],
[
"pd.Series(y_test.flatten())[-50:].plot()\npd.Series(y_pred.flatten())[-50:].plot()",
"_____no_output_____"
],
[
"### данный результат на самом деле не сильно лучше наивного предсказания",
"_____no_output_____"
],
[
"from sklearn.metrics import mean_squared_error as mse",
"_____no_output_____"
],
[
"mse(y_test.flatten(), y_pred.flatten())",
"_____no_output_____"
]
],
[
[
"### Stacked LSTM",
"_____no_output_____"
],
[
"#### Добавьте дополнительные скрытые слои в сеть (используйте return_sequences=True) и сравните качество",
"_____no_output_____"
]
],
[
[
"model = Sequential()\n# your code here\nmodel.compile(optimizer='adam', loss='mse')",
"_____no_output_____"
],
[
"model.fit(X_train, y_train, epochs=100, verbose=0)",
"_____no_output_____"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
],
[
"pd.Series(y_test.flatten())[-50:].plot()\npd.Series(y_pred.flatten())[-50:].plot()",
"_____no_output_____"
]
],
[
[
"### Bidirectional LSTM",
"_____no_output_____"
],
[
"#### Сделаем LSTM слой сети Bidirectional при помощи доп слоя Biderectional и сравним качество",
"_____no_output_____"
]
],
[
[
"from keras.layers import Bidirectional\n\nmodel = Sequential()\n# your code here\nmodel.compile(optimizer='adam', loss='mse')",
"_____no_output_____"
],
[
"model.fit(X_train, y_train, epochs=10, verbose=0)",
"_____no_output_____"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
]
],
[
[
"### Seq2Seq LSTM - когда нужно сделать предсказание на несколько точек вперед",
"_____no_output_____"
],
[
"#### Подготовим данные",
"_____no_output_____"
]
],
[
[
"from typing import Tuple\n\ndef transform_ts_into_matrix(ts: pd.Series, num_lags_in: int, num_lags_out: int) -> Tuple[np.array, np.array]:\n \"\"\"\n Данная функция должна пройтись скользящим окном по временному ряду и для каждых\n num_lags_in точек в качестве признаков собрать num_lags_out следующих точек в качестве таргета.\n \n Вернуть два np.array массива из X_train и y_train соответственно\n \"\"\"\n sequence = ts.values\n X, y = list(), list()\n i = 0\n outer_idx = num_lags_out\n while outer_idx < len(sequence):\n inner_idx = i + num_lags_in\n outer_idx = inner_idx + num_lags_out\n X_, y_ = sequence[i:inner_idx], sequence[inner_idx:outer_idx]\n X.append(X_)\n y.append(y_)\n i += 1\n return np.array(X), np.array(y)",
"_____no_output_____"
],
[
"# получим X и y при помощи предыдущей функции и разбейте на трейн и тест \nNUM_LAGS_IN = 28\nNUM_LAGS_OUT = 7\nX, y = transform_ts_into_matrix(ts, NUM_LAGS_IN, NUM_LAGS_OUT)\nX = X.reshape((X.shape[0], X.shape[1], 1))\n\nsplit_idx = int(len(X)*0.8)\nX_train, X_test = X[:split_idx], X[split_idx:]\ny_train, y_test = y[:split_idx], y[split_idx:]",
"_____no_output_____"
],
[
"# объявим енкодер\nmodel = Sequential()\nmodel.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS_IN, 1)))",
"_____no_output_____"
],
[
"# добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер\nfrom keras.layers import RepeatVector\nmodel.add(RepeatVector(NUM_LAGS_OUT))",
"_____no_output_____"
],
[
"# обьявим декодер\nmodel.add(LSTM(50, activation='relu', return_sequences=True))",
"_____no_output_____"
],
[
"# обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed\nfrom keras.layers import TimeDistributed\nmodel.add(TimeDistributed(Dense(1)))",
"_____no_output_____"
]
],
[
[
"#### Обучим модель и получим предсказание на тесте",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', loss='mse')",
"_____no_output_____"
],
[
"model.fit(X_train, y_train, epochs=10, verbose=0)",
"_____no_output_____"
],
[
"y_pred = model.predict(X_test)",
"_____no_output_____"
]
],
[
[
"### Пример с многомерным рядом.",
"_____no_output_____"
]
],
[
[
"ts_multi = pd.read_csv('../data/stability_index.csv', index_col='timestamp', parse_dates=True)",
"_____no_output_____"
],
[
"ts_multi.fillna(ts_multi.mean(), axis=0, inplace=True)",
"_____no_output_____"
],
[
"def transform_multi_ts_into_matrix(ts: pd.DataFrame, num_lags: int):\n \"\"\"\n Данная функция должна пройтись скользящим окном по временному ряду\n и собрать в качестве признаков X np.array размерности (len(ts)-num_lags, n_dims, num_lags),\n а в качестве y np.array размерности (len(ts)-num_lags, n_dims),\n где n_dims - размерность многомерного ряда.\n \n То есть для всех компонент временного ряда мы должны взять num_lags предыдущих точек каждой компонент\n в качестве признаков и все компоненты текущей точки в качестве target\n \"\"\"\n sequence = ts.values\n X, y = list(), list()\n i = 0\n end_i = num_lags\n while end_i < len(sequence): \n seq_x, seq_y = sequence[i:end_i], sequence[end_i]\n X.append(seq_x)\n y.append(seq_y)\n i += 1\n end_i = i + num_lags\n return np.array(X), np.array(y)",
"_____no_output_____"
],
[
"NUM_LAGS = 14\nN_DIMS = ts_multi.shape[1]\nX, y = transform_multi_ts_into_matrix(ts_multi, NUM_LAGS)",
"_____no_output_____"
],
[
"X[0].shape",
"_____no_output_____"
],
[
"# объявим енкодер\nmodel = Sequential()\nmodel.add(LSTM(100, activation='relu', input_shape=(NUM_LAGS, N_DIMS)))",
"_____no_output_____"
],
[
"# добавим промежуточный слой, преобразующий выход с енкодера для входного слоя в декодер\nfrom keras.layers import RepeatVector\nmodel.add(RepeatVector(N_DIMS))",
"_____no_output_____"
],
[
"# обьявим декодер\nmodel.add(LSTM(50, activation='relu', return_sequences=True))",
"_____no_output_____"
],
[
"# обьявим выходной слой - размерность на выходе получается при помощи дополнительного слоя TimeDistributed\nfrom keras.layers import TimeDistributed\nmodel.add(TimeDistributed(Dense(1)))",
"_____no_output_____"
],
[
"model.compile(optimizer='adam', loss='mse')",
"_____no_output_____"
],
[
"model.fit(X, y, epochs=50)",
"Epoch 1/50\n130/130 [==============================] - 1s 9ms/step - loss: 4337.5293\nEpoch 2/50\n130/130 [==============================] - 1s 10ms/step - loss: 3359.5718\nEpoch 3/50\n130/130 [==============================] - 1s 11ms/step - loss: 2346.5215\nEpoch 4/50\n130/130 [==============================] - 1s 11ms/step - loss: 1834.7770\nEpoch 5/50\n130/130 [==============================] - 1s 9ms/step - loss: 1693.2955\nEpoch 6/50\n130/130 [==============================] - 1s 10ms/step - loss: 1496.0602\nEpoch 7/50\n130/130 [==============================] - 1s 10ms/step - loss: 1387.5758\nEpoch 8/50\n130/130 [==============================] - 1s 11ms/step - loss: 1265.3087\nEpoch 9/50\n130/130 [==============================] - 1s 9ms/step - loss: 1505.0157\nEpoch 10/50\n130/130 [==============================] - 1s 9ms/step - loss: 1514.8870\nEpoch 11/50\n130/130 [==============================] - 1s 8ms/step - loss: 1301.9706\nEpoch 12/50\n130/130 [==============================] - 1s 9ms/step - loss: 1278.9486\nEpoch 13/50\n130/130 [==============================] - 1s 9ms/step - loss: 1309.2554\nEpoch 14/50\n130/130 [==============================] - 1s 9ms/step - loss: 1628.4979\nEpoch 15/50\n130/130 [==============================] - 1s 9ms/step - loss: 1819.9342\nEpoch 16/50\n130/130 [==============================] - 1s 9ms/step - loss: 1520.2660\nEpoch 17/50\n130/130 [==============================] - 1s 9ms/step - loss: 1324.4885\nEpoch 18/50\n130/130 [==============================] - 1s 9ms/step - loss: 1299.3295\nEpoch 19/50\n130/130 [==============================] - 1s 9ms/step - loss: 1186.3156\nEpoch 20/50\n130/130 [==============================] - 1s 10ms/step - loss: 1122.8571\nEpoch 21/50\n130/130 [==============================] - 1s 9ms/step - loss: 1125.4316\nEpoch 22/50\n130/130 [==============================] - 1s 10ms/step - loss: 1119.9897\nEpoch 23/50\n130/130 [==============================] - 1s 9ms/step - loss: 1101.6624\nEpoch 24/50\n130/130 [==============================] - 1s 9ms/step - loss: 1097.9153\nEpoch 25/50\n130/130 [==============================] - 1s 9ms/step - loss: 1144.5050\nEpoch 26/50\n130/130 [==============================] - 1s 9ms/step - loss: 1181.8234\nEpoch 27/50\n130/130 [==============================] - 1s 10ms/step - loss: 1165.9486\nEpoch 28/50\n130/130 [==============================] - 1s 10ms/step - loss: 1132.3014\nEpoch 29/50\n130/130 [==============================] - 1s 10ms/step - loss: 1069.6210\nEpoch 30/50\n130/130 [==============================] - 1s 9ms/step - loss: 1028.5364\nEpoch 31/50\n130/130 [==============================] - 1s 9ms/step - loss: 1086.3086\nEpoch 32/50\n130/130 [==============================] - 1s 9ms/step - loss: 1303.5736\nEpoch 33/50\n130/130 [==============================] - 1s 9ms/step - loss: 1373.5681\nEpoch 34/50\n130/130 [==============================] - 1s 9ms/step - loss: 1222.9882\nEpoch 35/50\n130/130 [==============================] - 1s 9ms/step - loss: 1151.4961\nEpoch 36/50\n130/130 [==============================] - 1s 9ms/step - loss: 1116.9482\nEpoch 37/50\n130/130 [==============================] - 1s 11ms/step - loss: 1094.3457\nEpoch 38/50\n130/130 [==============================] - 1s 10ms/step - loss: 1046.0753\nEpoch 39/50\n130/130 [==============================] - 1s 9ms/step - loss: 1030.7870\nEpoch 40/50\n130/130 [==============================] - 1s 9ms/step - loss: 1446.4260\nEpoch 41/50\n130/130 [==============================] - 1s 9ms/step - loss: 1158.3619\nEpoch 42/50\n130/130 [==============================] - 1s 10ms/step - loss: 1058.0692\nEpoch 43/50\n130/130 [==============================] - 1s 10ms/step - loss: 1028.4990\nEpoch 44/50\n130/130 [==============================] - 1s 10ms/step - loss: 1020.4298\nEpoch 45/50\n130/130 [==============================] - 1s 10ms/step - loss: 1017.2426\nEpoch 46/50\n130/130 [==============================] - 1s 10ms/step - loss: 995.0058\nEpoch 47/50\n130/130 [==============================] - 1s 10ms/step - loss: 979.2719\nEpoch 48/50\n130/130 [==============================] - 1s 10ms/step - loss: 965.5411\nEpoch 49/50\n130/130 [==============================] - 1s 10ms/step - loss: 982.8457\nEpoch 50/50\n130/130 [==============================] - 1s 10ms/step - loss: 954.3374\n"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2518a14404b680e9fea822e7b9d2f600514289
| 21,369 |
ipynb
|
Jupyter Notebook
|
docs/milestone2.ipynb
|
auto-differentiaters-in-CST/cs107-FinalProject
|
9d121176f94ba699b5a1db16c9af2fb5aa1637d2
|
[
"MIT"
] | 1 |
2021-01-17T12:38:21.000Z
|
2021-01-17T12:38:21.000Z
|
docs/milestone2.ipynb
|
auto-differentiaters-in-CST/cs107-FinalProject
|
9d121176f94ba699b5a1db16c9af2fb5aa1637d2
|
[
"MIT"
] | null | null | null |
docs/milestone2.ipynb
|
auto-differentiaters-in-CST/cs107-FinalProject
|
9d121176f94ba699b5a1db16c9af2fb5aa1637d2
|
[
"MIT"
] | null | null | null | 51.74092 | 974 | 0.643689 |
[
[
[
"# Milestone2 Document",
"_____no_output_____"
],
[
"## Feedback\n\n- Introduction: A nice introduction! \n\n- Background -0.5: It would be hard for users to understand automatic differentiation, computational graph, and evaluation trace if you don't give the corresponding illustrations in the Background section \n \n **Revision: provided a concrete example of evaluation trace and computational graph**\n\n\n- How to use -0.5: didn't show how the users can get the package from online. Is AutodiffCST the name of a python file or the package? Please give different names to avoid confusion. \n\n **Revision: added instructions for installation, and change the python file name to AD.py**\n\n\n- Implementation: Using a tree as the core data structure sounds new. It would be better if you could explain it with more details.\n\n **Revision: Changed core data structure to AD object, and updated the implementation part accordingly.**",
"_____no_output_____"
],
[
"## Section 1: Introduction\nThis package autodiffCST implements automatic differentiation. It can be used to automatically differentiate functions via forward mode and reverse mode, depending on the user's choice. It also provides an option of performing second order differentiation.\n\nDifferentiation, namely, the process of finding the derivatives of functions, is very prevalent in various areas of science and engineering. It can often be used to find the extrema of functions with single or multiple variables. With the advance of technology, more complicated functions and larger dataset are developed. The difficulty of performing differentiation has greatly increased and we are more dependent on computers to take derivates. Nowadays, we have three major ways of performing differentiation: symbolic, numerical and automatic (algorithmic) differentiation. We will focus on automatic differentiation for the rest of this document.\n\n",
"_____no_output_____"
],
[
"## Section 2: Background\n### 2.1 An Overview of Auto Differentiation\nAutomatic differentiation (AD) uses algorithms to efficiently and accurately evaluating derivatives of numeric functions. It has the advantage of avoiding symbolic manipulation of functions while reaching an accuracy close to machine precision. Application of automatic differentiation includes but is not limited to astronomy, dynamic systems, numerical analysis research, optimization in finance and engineering.\n\nThe idea behind AD is to break down a function into a sequence of elementary operations and functions that have easily attained derivatives, and then sequencially apply the chain rule to evaluate the derivatives of these operations to compute the derivative of the whole function.\n\nThe two main methods of performing automatic differentiation are forward mode and reverse mode. Some other AD algorithms implement a combination of forward mode and reverse mode, but this package will implement them seperately. \n\nTo better understand automatic differentiation, it is uncessary to get familar with some key concepts that are used in the algorithms of AD. We will use the rest of this section to briefly introduce them.\n\n### 2.2 Elementary operations and functions\nThe algorithm of automatic differentiation breaks down functions into elementary arithmetic operations and elementary functions. Elementary arithmetic operations include addition, subtraction, multiplication, division and raising power (we can also consider taking roots of a number as raising it to powers less than $1$). Elementary functions include exponential, logrithmatic, and trigonometry. All of these operations and functions mentioned here have derivates that are easy to compute, so we use them as elementary steps in the evaluation trace of AD.\n\n### 2.3 The Chain Rule\nThe chain rule can be used to calculate the derivate of nested functions, such in the form of $u(v(t))$. For this function, the derivative of $u$ with respect to $t$ is $$\\dfrac{\\partial u}{\\partial t} = \\dfrac{\\partial u}{\\partial v}\\dfrac{\\partial v}{\\partial t}.$$\n\nA more general form of chain rule applies when a function $h$ has several arguments, or when its argument is a vector. Suppose we have $h = h(y(t))$ where $y \\in R^n$ and $t \\in R^m $. Here, $h$ is the combination of $n$ functions, each of which has $m$ variables. Using the chain rule, the derivative of $h$ with respect to $t$, now called the gradient of $h$, is\n\n $$ \\nabla_{t}h = \\sum_{i=1}^{n}{\\frac{\\partial h}{\\partial y_{i}}\\nabla y_{i}\\left(t\\right)}.$$\n\nThe chain rule enables us to break down complicated and nested functions into layers and operations. Our automatic differentiation algrithm sequencially sues chain rule to compute the derivative of funtions. \n\n### 2.4 Evaluation Trace and Computational Graph\n\nThese two concepts are the core of our automatic differentiation algorithm. Since they are so important and can be created at the same time, creating them would be the first thing to do when a function is inputted into the algorithm.\n\nThe evaluation trace tracks each layer of operations while evaluate the input function and its derivative. At each step the evaluation trace holds the traces, elementary operations, numerical values, elementary derivatives and partial derivatives. \n\nThe computational graph is a graphical visualization of the evaluation trace. It holds the traces and elementary operations of the steps, connecting them via arrows pointing from input to output for each step. The computational graph helps us to better understand the structure of the function and its evaluation trace. Forward mode performs the operations from the start to the end of the graph or evaluation trace. Reverse mode performs the operations backwards, while applying the chain rule at each time determining the derivate of the trace.\n\nHere, we provide an example of a evaluation trace and a computational graph of the function $f(x,y)=exp(−(sin(x)−cos(y))^2)$, with derivatives evaluated at $f(π/2,π/3)$.\n",
"_____no_output_____"
],
[
"Evaluation trace:\n\n|Trace|Elementary Function| Current Value |Elementary Function Derivative| $\\nabla_x$ | $\\nabla_y$ |\n| :---: | :-----------: | :-------: | :-------------: | :----------: | :-----------: |\n| $x_{1}$ | $x_{1}$ | $\\frac{\\pi}{2}$ | $\\dot{x}_{1}$ | $1$ | $0$ |\n| $y_{1}$ | $y_{1}$ | $\\frac{\\pi}{3}$ | $\\dot{y}_{1}$ | $0$ | $1$ |\n| $v_{1}$ | $sin(x_{1})$ | $1$ | $cos(x_{1})\\dot{x}_{1}$ | $0$ | $0$ |\n| $v_{2}$ | $cos(y_{1})$ | $0.5$ | $-sin(y_{1})\\dot{y}_{1}$| $0$ | $-0.866$ |\n| $v_{3}$ | $v_{1}-v_{2}$ | $0.5$ | $\\dot{v}_{1}-\\dot{v}_{2}$| $0$ | $0.866$ |\n| $v_{4}$ | $v_{3}^2$ | $0.25$ | $2v_{3}\\dot{v}_{3}$ | $0$ | $0.866$ |\n| $v_{5}$ | $-v_{4}$ | $-0.25$| $-\\dot{v}_{4}$ | $0$ | $-0.866$ |\n| $v_{6}$ | $exp(v_{5})$ | $0.779$| $exp(v_{5})\\dot{v}_{5}$ | $0$ | $-0.6746$ |\n| $f$ | $v_{6}$ | $0.779$| $\\dot{v}_{6}$ | $0$ | $-0.6746$ |\n\nComputational graph:",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"## Section 3: How to Use AutodiffCST\n",
"_____no_output_____"
],
[
"**Installation**\n\nOur package is for Python 3 only. To install AutodiffCST, you need to have pip3 installed first. If you don't, please install pip3 following these instructions https://pip.pypa.io/en/stable/installing/.\n\nThen, you could install this package by running \n```pip3 install AutodiffCST``` from the command line. \n\nAn alternative is to clone our repository by running ```git clone https://github.com/auto-differentiaters-in-CST/cs107-FinalProject.git``` from the command line and then ```cd <AD directory>```(directory name will be determined later), ```pip install -r requirements.txt```.",
"_____no_output_____"
],
[
"**User Guide**\n\nAfter installation, users could import this package by ```from AutodiffCST import AD``` and ```from autodiffcst import admath```. These two packages would allow the users to do differentiation on functions with most mathematic operations.\n\nThen, they could simply initiate the AD object by giving the point where they wish to differentiate. Moreover, they could also try other supplementary features as in the code demo provided below.\n",
"_____no_output_____"
],
[
"``` python\n# import modules\nimport numpy as np\nfrom AutodiffCST import AD as ad\nfrom autodiffcst import admath as admath\n\n# base case: initialize AD object with scalar values\n\nx = ad(5, tag = \"x\") # initialize AD object called \"x\" with the value 5\ny = ad(3, tag = \"y\") # initialize AD object called \"y\" with the value 3\n\nf = x*y + 1 # build a function with AD objects, the function will also be an AD object\nprint(f) # print 9.0\n\ndfdx = f1.diff(direction = \"x\") # returns the derivative with respect to x\nprint(dfdx) # print 3\n \njacobian = ad.jacobian(f1) # returns a gradient vector of f\nprint(jacobian) # print [5,3]\n\nf2 = x + admath.sin(y) # build a function with AD objects\nprint(f2) # print AD(value: 5.141120008059867, derivatives: {'x': 1, 'y': -0.9899924966004454})\n\ndfdy = f2.diff(direction= = \"y\") # returns the derivative with respect to x\nprint(dfdy) # print -0.9899924966004454\n \njacobian2 = ad.jacobian(f2) # returns a gradient vector of f\nprint(jacobian2) # print [1, -0.9899924966004454]\n\n\n# These are the most important features for our forward AD. Would add more later ...\n```",
"_____no_output_____"
],
[
"## Section 4: Software Organization\nThe home directory of our software package would be structured as follows.\n\n- LICENSE\n- README.md\n- requirements.txt\n- docs/\n * quickstart_tutotial.md\n * model_documentation.md\n * testing_guidelines.md\n * concepts_explanation.md\n * references.md\n- setup.py\n- autodiffcst/\n * \\_\\_init\\_\\_.py\n * AD.py\n * admath.py\n\n- tests/\n * test_core.py\n * test_extension.py\n\n- TravisCI.yml\n- CodeCov.yml\n\n\nSpecificly speaking, the README file would contain a general package description and the necessary information for users to navigate in the subdirectories. Besides, we would place our documentation, testing guidelines, a simple tutorial and relative references in the doc directory. Moreover, to package our model with PyPI, we need to include setup.py and a src directory, where stores the source code about our model. Furthermore, we would put a collection of test cases in tests directory. Last but not least, we would include TravisCI.yml and CodeCov.yml in our home directory for integrated test.\n\nIn this package, we plan to use the following public modules. \n\n- Modules for mathmatical calculation:\n * Numpy: we would use it for matrix operations, and basic math functions and values, such as sin, cos, \\pi, e, etc. \n\n- Modules for testing:\n * pydoc\n * doctest \n * Pytest\n\n- Other modules:\n * sys\n * setuptools: we would use is for publishing our model with PyPI. \n \nTo distribute our package, we would use PyPI so that users could easily install the package with *pip install*.\n\nAfter installing the package, users can use ```from AutodiffCST import AD``` and ```from autodiffcst import admath``` to import the package. These two modules are where the core of this package resides:\n\n * AD: defines the AD object class that we use to perform automatic differentiation and overwrites basic math operation dunder methods for AD. Also provides two core functions to perform on AD: diff() and jacobian().\n \n * admath: defines functions that perform elementary math operations on AD, which include those that cannot be performed by overwriting dunder methods, such as logarithm and trigonometry.\n\nTo better organize our software, we plan to use PyScaffold and Sphinx. The former could help us setting up the project while the latter would polish our documentation. \n ",
"_____no_output_____"
],
[
"## Section 5: Implementation",
"_____no_output_____"
],
[
"Our main data structure is the AD object, which has the attributes of a value, a derivative and a tag. In terms of the classes, our main class is the AD object, and we would probably have several heritaged class for our extensions.\n\nIn the AD class, we would have the following methods:\n\n- a constructor\n\n``` python\ndef __init__(self, val, tags, der=1, mode = \"forward\"):\n self.val = val\n if (isinstance(tags, list)) and (isinstance(ders,dict)):\n self.tags = tags\n self.ders = ders\n else:\n self.tags = [tags]\n self.ders = {tags: ders}\n self.mode = mode\n``` \n- overloaded dunder methods as follows:\n\n``` python\n__add__\n__sub__\n__pow__\n__mul__\n__mod__\n__div__\n__iadd__\n``` \n  and more basic operations according to https://www.python-course.eu/python3_magic_methods.php\n\n- a diff method, which takes in a direction, and returns the derivative of the function.\n\n``` python\ndef diff(self, dir = x):\n if isinstance(dir, AD):\n return self.der[dir]\n else:\n return 0\n``` \n\n- a gradient method, which takes in a vector of directions, and returns a vector of the partial derivatives at each direction.\n\n- a jacobian method, which takes in a vector of AD functions and a vector of directions, and returns the jacobian matrix.\n\nIn our implementation, we would use some external dependencies such as Numpy and Math. To deal with elementary functions, we would allow users to enter functions that can be recognized by Python, factor a input function to a series of basic operations/functions (such as sin, sqrt, log, and exp) and use if-statements to check functions and return their symbolic derivatives. These operations are handled in admath.py. The functions in admath takes an AD object as input and performs the corresponding operations on the AD objects by updating their values and derivatives.",
"_____no_output_____"
],
[
"# Future Features",
"_____no_output_____"
],
[
"1. Differentiate a list of functions. Our package now can deal with one function with multiple varaibles. In the future we plan to take a list of functions as input and output its Jacobian accordingly. Using Numpy array as the data structure to keep the Jacobian would be ideal, so we will need to change the implementation of our current jacobian method. \n\n\n2. Higher order derivatives. A starting point would be allowing second order derivatives taken on our AD objects and returning the correct Jacobian matrix accordingly. Note that this cannot be achieved by simply applying diff() to an AD object twices, since the Jacobian matrix would be different and the datatype would be different. We would need to store the values of the second derivatives of our AD object at each elementary steps in the evaluation trace. Then we would need another function to return the second derivatives (possibly named second_diff()), which functions similarly to diff(), but returns the second derivatives of the AD object. The jacobian() function will also be modified accordingly. It will include an optional input (possibly initialized as second_order = False for defult and second_order = True for second derivatives), which signals that the function will return the Jacobian containing the second order derivatives of the AD object.\n\nBackup extensions:\n\n3. Backward Mode. Right now our mode for doing automatic differetiation is defaulted to forward mode, because we have not implemented backward mode yet. We would need new functions that use the AD object class to implement backward mode. To keep track of the traces, we need to create a trace table, possibly using Numpy array, in the function that runs backward mode.\n\n\n4. Newton's method. We would like to use our AD package to solve meaningful problems. One way to achieve this is to use it in an implementation of Newton's method. This will be a script that imports our AD package to calculate the derivatives in Newton's method.",
"_____no_output_____"
],
[
"# Building Timeline",
"_____no_output_____"
],
[
"- Nov.4: Finish M2A and M2B\n- Nov.7: Finish basics dunder methods for one variable\n- Nov.14: Finish Test Suite\n- Nov.19: Submit M2",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a251903e4e323c6fe7cc3f844cca458fc2eb780
| 353,839 |
ipynb
|
Jupyter Notebook
|
Notebooks/Gene_Drugs_reference_dicts.ipynb
|
mese433/Capstone_JM_MesMiner
|
02310d1192f63c59c6d5580e38a0d15463f0e436
|
[
"MIT"
] | 1 |
2021-04-20T16:27:10.000Z
|
2021-04-20T16:27:10.000Z
|
Notebooks/Gene_Drugs_reference_dicts.ipynb
|
mese433/Capstone_JM_MesMiner
|
02310d1192f63c59c6d5580e38a0d15463f0e436
|
[
"MIT"
] | null | null | null |
Notebooks/Gene_Drugs_reference_dicts.ipynb
|
mese433/Capstone_JM_MesMiner
|
02310d1192f63c59c6d5580e38a0d15463f0e436
|
[
"MIT"
] | null | null | null | 43.884286 | 278 | 0.525767 |
[
[
[
"import pandas as pd\nfrom joblib import dump, load\nimport os\n#set up directory\n#os.chdir()\n",
"_____no_output_____"
],
[
"#Drug dic\n\n#open file\ndf_drugs=pd.read_csv(r\"C:\\Users\\mese4\\Documents\\The Data incubator\\project\\Drugmap\\drugbank vocabulary.csv\", encoding='ISO-8859-1')\n\nsynonyms = []\ndrug_names = df_drugs['Common_name'].tolist()\n\ndrug_names = [item.lower() for item in drug_names]\n\n#get synonims into a list\nfor row in df_drugs['Synonyms']:\n row=str(row).lower()\n words = row.split(' | ')\n synonyms.append(words)\n\n#add names to synonims\nfor x, y in zip(synonyms, drug_names):\n x.append(y)\n \n#make tuple list \ndrug_lists= list(zip(drug_names, synonyms))\n\n#make dict\ndrug_dic = dict(drug_lists)\n\n#remove 'nan'\ndrug_dic = {k:[elem for elem in v if elem != 'nan' ] for k,v in drug_dic.items()}\n\n\n#search engine\nkeys = [key for key, value in drug_dic.items() if 'Cetuximab' in value]\ndrug_dic",
"_____no_output_____"
],
[
"#Save/open\n\ndump(drug_dic, 'drug_dic.joblib') \ndrug_dic = load('drug_dic.joblib') ",
"_____no_output_____"
],
[
"#Gene dic\ndf_genes=pd.read_csv(r\"C:\\Users\\mese4\\Documents\\The Data incubator\\project\\genes_dataset\\G-SynMiner_miner-geneHUGO.tsv\",sep='\\t')\n\ngene_tag = df_genes['symbol'].tolist()\ngene_tag = [item.lower() for item in gene_tag]\n\ngene_name = df_genes['name'].tolist()\ngene_name = [item.lower() for item in gene_name]\n\n#split synonims into a list\nsynonyms_gene = []\nfor row in df_genes['alias_symbol']:\n row=str(row).lower()\n words = row.split('|')\n synonyms_gene.append(words)\n\n#split alias_name into a list\nsynonyms_alias_name = []\nfor row in df_genes['alias_name']:\n row=str(row).lower()\n words = row.split('|')\n synonyms_alias_name.append(words)\n \n#split prev_symbol into a list\nsynonyms_prev_symbol = []\nfor row in df_genes['prev_symbol']:\n row=str(row).lower()\n words = row.split('|')\n synonyms_prev_symbol.append(words)\n\n#all_combined = list(zip(gene_tag, gene_name, synonyms_gene,synonyms_alias_name,synonyms_prev_symbol ))\n \n#add tags \nfor x, y in zip(synonyms_gene, gene_tag):\n x.append(y)\n \n#add name \nfor x, y in zip(synonyms_gene, gene_name):\n x.append(y)\n \n#add alias_name \nfor x, y in zip(synonyms_gene, synonyms_alias_name):\n x.append(y[0])\n \n#add synonyms_prev_symbol \nfor x, y in zip(synonyms_gene, synonyms_prev_symbol):\n x.append(y[0])\n\n#make tuple list \ngene_lists= list(zip(gene_tag, synonyms_gene))\n\n#make dict\ngene_dic = dict(gene_lists)\n\n#remove 'nan'\ngene_dic = {k:[elem for elem in v if elem != 'nan' ] for k,v in gene_dic.items()}\n\n\n#search engine\nkeys = [key for key, value in gene_dic.items() if 'LORSDH' in value]\n\n\n",
"_____no_output_____"
],
[
"#save open\n\ndump(gene_dic, 'gene_dic.joblib') \ngene_dic = load('gene_dic.joblib') ",
"_____no_output_____"
],
[
"[key for key, value in gene_dic.items() if 'nrf2' in value] ",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a252ebeb5a54c583cd4ce2fbea1435da32d75c3
| 262,855 |
ipynb
|
Jupyter Notebook
|
notebooks/Power-basics.ipynb
|
jbpoline/module-stats
|
539b2972c3e19646cce335a488bd631bbd266435
|
[
"CC-BY-4.0"
] | null | null | null |
notebooks/Power-basics.ipynb
|
jbpoline/module-stats
|
539b2972c3e19646cce335a488bd631bbd266435
|
[
"CC-BY-4.0"
] | null | null | null |
notebooks/Power-basics.ipynb
|
jbpoline/module-stats
|
539b2972c3e19646cce335a488bd631bbd266435
|
[
"CC-BY-4.0"
] | null | null | null | 370.74048 | 55,128 | 0.922912 |
[
[
[
"# Introduction to reproducibility and power issues",
"_____no_output_____"
],
[
"## Some Definitions ",
"_____no_output_____"
],
[
"* $H_0$ : null hypothesis: The hypotheis that the effect we are testing for is null\n\n* $H_A$ : alternative hypothesis : Not $H_0$, so there is some signal\n\n* $T$ : The random variable that takes value \"significant\" or \"not significant\"\n\n* $T_S$ : Value of T when test is significant (eg $T = T_S$)\n\n* $T_N$ : Value of T when test is not significant (eg $T = T_N$)\n\n* $\\alpha$ : false positive rate - probability to reject $H_0$ when $H_0$ is true (therefore $H_A$ is false)\n\n* $\\beta$ : false negative rate - probability to accept $H_0$ when $H_A$ is true (i.e. $H_0$ is false)\n",
"_____no_output_____"
],
[
"power = $1-\\beta$ \n\nwhere $\\beta$ is the risk of *false negative*\n\nSo, to compute power, *we need to know what is the risk of false negative*, ie, the risk to not show a significant effect while we have some signal (null is false).",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\nimport numpy as np\nimport scipy.stats as sst",
"_____no_output_____"
],
[
"from sympy import symbols, Eq, solve, simplify, lambdify, init_printing, latex\ninit_printing(use_latex=True, order='old')",
"_____no_output_____"
],
[
"from IPython.display import HTML\n# Code to make HTML for a probability table\ndef association_table(assocs, title):\n latexed = {'title': title}\n for key, value in assocs.items():\n latexed[key] = latex(value)\n latexed['s_total'] = latex(assocs['t_s'] + assocs['f_s'])\n latexed['ns_total'] = latex(assocs['t_ns'] + assocs['f_ns'])\n return \"\"\"<h3>{title}</h3>\n <TABLE><TR><TH>$H/T$<TH>$T_S$<TH>$T_N$\n <TR><TH>$H_A$<TD>${t_s}$<TD>${t_ns}$\n <TR><TH>$H_0$<TD>${f_s}$<TD>${f_ns}$\n <TR><TH>Total<TD>${s_total}$<TD>${ns_total}$\n </TABLE>\"\"\".format(**latexed)",
"_____no_output_____"
],
[
"from sympy.abc import alpha, beta # get alpha, beta symbolic variables\nassoc = dict(t_s = 1 - beta, # H_A true, test significant = true positives\n t_ns = beta, # true, not significant = false negatives\n f_s = alpha, # false, significant = false positives\n f_ns = 1 - alpha) # false, not sigificant = true negatives\nHTML(association_table(assoc, 'Not considering prior'))",
"_____no_output_____"
]
],
[
[
"## How do we compute power ? ",
"_____no_output_____"
],
[
"### What is the effect ?\n\n\n$$\\hspace{3cm}\\mu = \\mu_1 - \\mu_2$$",
"_____no_output_____"
],
[
"### What is the standardized effect ? (eg Cohen's d)\n\n$$\\hspace{3cm}d = \\frac{\\mu_1 - \\mu_2}{\\sigma} = \\frac{\\mu}{\\sigma}$$",
"_____no_output_____"
],
[
"### \"Z\" : Effect accounting for the sample size \n\n$$\\hspace{3cm}Z = \\frac{\\mu}{\\sigma / \\sqrt{n}}$$",
"_____no_output_____"
],
[
"### Cohen's d value:",
"_____no_output_____"
]
],
[
[
"# print some cohen values\n# %pylab inline\nmuse = (.05, .1,.2,.3,.4,.5);\nsigmas = np.linspace(1.,.5,len(muse))\ncohenstr = [\"For sigma = %3.2f and m = %3.2f Cohen d = %3.2f\" %(sig,mu,coh) \n for (sig,mu,coh) in zip(sigmas,muse, np.asarray(muse)/sigmas)]\nfor s in cohenstr:\n print(s)",
"For sigma = 1.00 and m = 0.05 Cohen d = 0.05\nFor sigma = 0.90 and m = 0.10 Cohen d = 0.11\nFor sigma = 0.80 and m = 0.20 Cohen d = 0.25\nFor sigma = 0.70 and m = 0.30 Cohen d = 0.43\nFor sigma = 0.60 and m = 0.40 Cohen d = 0.67\nFor sigma = 0.50 and m = 0.50 Cohen d = 1.00\n"
]
],
[
[
"We have to estimate the effect $\\mu$, say under some normal noise. Our statistic will be:\n\n$$\nt = \\frac{\\hat{\\mu}}{\\hat{\\sigma_{\\mu}}} = \\frac{\\hat{\\mu}}{\\hat{{SE}_{\\mu}}}\n$$",
"_____no_output_____"
],
[
"Power is the probability that the observed t is greater than $t_{.05}$, computing $t_{.05}$ by assuming that we are under the null. \n\nSo, we compute $t_{.05}$, and want to compute $P(t > t_{.05})$. \n\nTo compute this, __we need the distribution of our measured t - therefore we need to know the signal / effect size !__ \n\nLet's assume we know this and call it $t_{nc}$, and $F_{nc}$ for the cumulative distribution (more on this in the appendix).\n\n$\\mbox{Power} = 1 - \\beta = P(t > t_{.05}) = 1 - F_{nc}(t_{.05})$\n",
"_____no_output_____"
],
[
"__This power will depend on 4 parameters :__\n\n$$ \\mbox{The non standardized effect : } \\mu$$\n\n$$\\mbox{The standard deviation of the data : } \\sigma$$\n\n$$\\mbox{The number of subjects : } n$$\n\n$$\\mbox{The type I risk of error : } \\alpha$$\n\nAnd on the distribution of the statistic under the alternative hypothesis. Here, we assume our original data are normals, and the $t = \\frac{\\hat{\\mu}}{\\hat{{SE}_{\\mu}}}$ statistics follows a non central t distribution with non centrality parameter \n\n$$\\theta = \\mu \\sqrt{n}/\\sigma$$\n\nand $n-1$ degrees of freedom. ",
"_____no_output_____"
]
],
[
[
"import scipy.stats as sst\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom __future__ import division",
"_____no_output_____"
],
[
"# plot power as a function of n : define a little function that \n# takes n, mu, sigma, alpha, and report n. \n# Optionally plot power as a function of nfrom matplotlib.patches import Polygon\n\nfrom matplotlib.patches import Polygon\n\ndef stat_power(n=16, mu=1., sigma=1., alpha=0.05, plot=False, xlen=500):\n \"\"\"\n This function computes the statistical power of an analysis assuming a normal\n distribution of the data with a one sample t-test\n \n Parameters:\n -----------\n n: int,\n The number of sample in the experiment\n mu: float\n The mean of the alternative\n sigma: float\n The standard deviation of the alternative\n plot: bool\n Plot something\n alpha: float\n The risk of error (type I)\n xlen: int\n Number of points for the display\n \n Returns:\n --------\n float\n The statistical power for this number of sample, mu, sigma, alpha\n \"\"\"\n df = n-1\n theta = np.sqrt(n)*mu/sigma\n t_alph_null = sst.t.isf(alpha, df)\n ncrv = sst.nct(df, theta)\n spow = 1 - ncrv.cdf(t_alph_null)\n \n if plot:\n # define the domain of the plot\n norv = sst.norm(0, 1.)\n bornesnc = ncrv.isf([0.001, .999])\n bornesn = norv.isf([0.001, .999])\n # because the nc t will have higher max borne, and the H0 normal will be on the left\n x = np.linspace(np.min(bornesn), np.max(bornesnc), xlen)\n t_line = np.zeros_like(x)\n # define the line \n x_t_line = np.argmin((x-t_alph_null)**2)\n y_t_line = np.max(np.hstack((ncrv.pdf(x), norv.pdf(x))))\n t_line[x_t_line] = y_t_line\n\n fig, ax = plt.subplots()\n plt.plot(x, ncrv.pdf(x), 'g', x, norv.pdf(x), 'b', x, t_line, 'r')\n \n # Make the shaded region\n # http://matplotlib.org/xkcd/examples/showcase/integral_demo.html\n a = x[x_t_line]; b = np.max(bornesnc);\n ix = np.linspace(a,b)\n iy = ncrv.pdf(ix)\n verts = [(a, 0)] + list(zip(ix, iy)) + [(b, 0)]\n poly = Polygon(verts, facecolor='0.9', edgecolor='0.5')\n ax.add_patch(poly)\n \n ax.set_xlabel(\"t-value - H1 centred on \" + r\"$\\theta $\" + \" = %4.2f; \" %theta \n + r\"$\\mu$\" + \" = %4.2f\" %mu); \n ax.set_ylabel(\"Probability(t)\");\n ax.set_title('H0 and H1 sampling densities ' \n + r'$\\beta$' + '= %3.2f' %spow + ' n = %d' %n)\n plt.show()\n\n return spow\n",
"_____no_output_____"
],
[
"n = 30\nmu = .5\nsigma = 1.\npwr = stat_power(n, mu, sigma, plot=True, alpha=0.05, xlen=500)\nprint (\"Power = \", pwr, \" Z effect (Non centrality parameter) = \", mu*np.sqrt(n)/sigma)",
"_____no_output_____"
],
[
"n = 12\nmu = .5\nsigma = 1.\npwr = stat_power(n, mu, sigma, plot=True, alpha=0.05, xlen=500)\nprint(\"Power = \", pwr, \" Z effect (Non centrality parameter): \", mu*np.sqrt(n)/sigma)",
"_____no_output_____"
]
],
[
[
"### Plot power as a function of the number of subject in the study",
"_____no_output_____"
]
],
[
[
"def pwr_funcofsubj(muse, nses, alpha=.05, sigma=1):\n \"\"\"\n muse: array of mu\n nses: array of number of subjects\n alpha: float, type I risk\n sigma: float, data sigma\n \"\"\"\n mstr = [ 'd='+str(m) for m in np.asarray(muse)/sigma]\n lines=[]\n for mu in (muse):\n pw = [stat_power(n, mu, sigma, alpha=alpha, plot=False) for n in nses] \n (pl,) = plt.plot(nses, pw)\n lines.append(pl)\n plt.legend( lines, mstr, loc='upper right', shadow=True)\n plt.xlabel(\" Number of subjects \")\n plt.ylabel(\" Power \");\n \n return None\n\nmus = (.05, .1,.2,.3,.4,.5, .6);\n#nse = range(70, 770, 20)\nnse = range(7, 77, 2)\nalph = 1.e-3\n \npwr_funcofsubj(mus, nse, alph)",
"_____no_output_____"
]
],
[
[
"### **** Here - play with n ****",
"_____no_output_____"
]
],
[
[
"mus = (.05,.1,.2,.3,.4,.5,.6);\nnse = range(10, 330, 20)\n#nse = range(7, 77, 2)\nalph = 0.001\n \npwr_funcofsubj(mus, nse, alph)",
"_____no_output_____"
]
],
[
[
"### Here - play with $\\alpha$",
"_____no_output_____"
]
],
[
[
"mus = (.05, .1,.2,.3,.4,.5, .6);\nnse = range(10, 770, 20)\n#nse = range(7, 77, 2)\nalph = 0.05/30000\n \npwr_funcofsubj(mus, nse, alph)",
"_____no_output_____"
]
],
[
[
"### What is the effect size of APOE on the hippocampal volume ?",
"_____no_output_____"
],
[
"Authors find p value of 6.63e-10\n\nThey had 733 subjects\n\n",
"_____no_output_____"
]
],
[
[
"n01 = sst.norm(0,1.)\nz = n01.isf(6.6311e-10)\nd = n01.isf(6.6311e-10)/np.sqrt(733)\nprint(\"z = %4.3f d = %4.3f \" %(z,d))",
"z = 6.064 d = 0.224 \n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a253dda5fa9710dc397e673701be76dd683d58b
| 29,711 |
ipynb
|
Jupyter Notebook
|
LAD_CLIP.ipynb
|
xiaodanhu/AttDiscovery
|
3bb2323bf33ca2128cbcd52e10878f1c773e109d
|
[
"MIT"
] | null | null | null |
LAD_CLIP.ipynb
|
xiaodanhu/AttDiscovery
|
3bb2323bf33ca2128cbcd52e10878f1c773e109d
|
[
"MIT"
] | null | null | null |
LAD_CLIP.ipynb
|
xiaodanhu/AttDiscovery
|
3bb2323bf33ca2128cbcd52e10878f1c773e109d
|
[
"MIT"
] | null | null | null | 36.95398 | 273 | 0.555485 |
[
[
[
"import os\nimport torch\nimport numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nfrom torch.optim.lr_scheduler import LambdaLR, StepLR",
"_____no_output_____"
],
[
"#@title\n\nimport gzip\nimport html\nimport os\nfrom functools import lru_cache\n\nimport ftfy\nimport regex as re\n\n\n@lru_cache()\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a corresponding list of unicode strings.\n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n And avoids mapping to whitespace/control characters the bpe code barfs on.\n \"\"\"\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"¡\"), ord(\"¬\")+1))+list(range(ord(\"®\"), ord(\"ÿ\")+1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8+n)\n n += 1\n cs = [chr(n) for n in cs]\n return dict(zip(bs, cs))\n\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs\n\n\ndef basic_clean(text):\n text = ftfy.fix_text(text)\n text = html.unescape(html.unescape(text))\n return text.strip()\n\n\ndef whitespace_clean(text):\n text = re.sub(r'\\s+', ' ', text)\n text = text.strip()\n return text\n\n\nclass SimpleTokenizer(object):\n def __init__(self, bpe_path: str = \"clip/bpe_simple_vocab_16e6.txt.gz\"):\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n merges = gzip.open(bpe_path).read().decode(\"utf-8\").split('\\n')\n merges = merges[1:49152-256-2+1]\n merges = [tuple(merge.split()) for merge in merges]\n vocab = list(bytes_to_unicode().values())\n vocab = vocab + [v+'</w>' for v in vocab]\n for merge in merges:\n vocab.append(''.join(merge))\n vocab.extend(['<|startoftext|>', '<|endoftext|>'])\n self.encoder = dict(zip(vocab, range(len(vocab))))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.bpe_ranks = dict(zip(merges, range(len(merges))))\n self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}\n self.pat = re.compile(r\"\"\"<\\|startoftext\\|>|<\\|endoftext\\|>|'s|'t|'re|'ve|'m|'ll|'d|[\\p{L}]+|[\\p{N}]|[^\\s\\p{L}\\p{N}]+\"\"\", re.IGNORECASE)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token[:-1]) + ( token[-1] + '</w>',)\n pairs = get_pairs(word)\n\n if not pairs:\n return token+'</w>'\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def encode(self, text):\n bpe_tokens = []\n text = whitespace_clean(basic_clean(text)).lower()\n for token in re.findall(self.pat, text):\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))\n bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def decode(self, tokens):\n text = ''.join([self.decoder[token] for token in tokens])\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=\"replace\").replace('</w>', ' ')\n return text\n",
"_____no_output_____"
],
[
"# from clip import clip\n# from clip import model\n\n# model, preprocess = clip.load(\"ViT-B/32\", device='cuda', jit=False)\n\nmodel = torch.jit.load(\"../checkpoints/model.pt\").cuda().eval()\ninput_resolution = model.input_resolution.item()\ncontext_length = model.context_length.item()\nvocab_size = model.vocab_size.item()\n\nprint(\"Model parameters:\", f\"{np.sum([int(np.prod(p.shape)) for p in model.parameters()]):,}\")\nprint(\"Input resolution:\", input_resolution)\nprint(\"Context length:\", context_length)\nprint(\"Vocab size:\", vocab_size)",
"Model parameters: 151,277,313\nInput resolution: 224\nContext length: 77\nVocab size: 49408\n"
]
],
[
[
"# Step 1: Load LAD Dataset\n## Option 1 Load Dataset from Scratch",
"_____no_output_____"
]
],
[
[
"file_root = '/media/hxd/82231ee6-d2b3-4b78-b3b4-69033720d8a8/MyDatasets/LAD'\ndata_root = file_root + '/LAD_annotations/'\nimg_root = file_root + '/LAD_images/'",
"_____no_output_____"
],
[
"# load attributes list\nattributes_list_path = data_root + 'attribute_list.txt'\nfsplit = open(attributes_list_path, 'r', encoding='UTF-8')\nlines_attribute = fsplit.readlines()\nfsplit.close()\nlist_attribute = list()\nlist_attribute_value = list()\nfor each in lines_attribute:\n tokens = each.split(', ')\n list_attribute.append(tokens[0])\n list_attribute_value.append(tokens[1])",
"_____no_output_____"
],
[
"# load label list\nlabel_list_path = data_root + 'label_list.txt'\nfsplit = open(label_list_path, 'r', encoding='UTF-8')\nlines_label = fsplit.readlines()\nfsplit.close()\nlist_label = dict()\nlist_label_value = list()\nfor each in lines_label:\n tokens = each.split(', ')\n list_label[tokens[0]]=tokens[1]\n list_label_value.append(tokens[1])",
"_____no_output_____"
],
[
"from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize\nfrom PIL import Image\n\npreprocess = Compose([\n Resize((224, 224), interpolation=Image.BICUBIC),\n CenterCrop((224, 224)),\n ToTensor()\n])\n\nimage_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073]).cuda()\nimage_std = torch.tensor([0.26862954, 0.26130258, 0.27577711]).cuda()",
"/home/hxd/anaconda3/envs/py38/lib/python3.8/site-packages/torchvision/transforms/transforms.py:280: UserWarning: Argument interpolation should be of type InterpolationMode instead of int. Please, use InterpolationMode enum.\n warnings.warn(\n"
],
[
"# load all the labels, attributes, images data from the LAD dataset\nattributes_per_class_path = data_root + 'attributes.txt'\nfattr = open(attributes_per_class_path, 'r', encoding='UTF-8')\nlines_attr = fattr.readlines()\nfattr.close()\nimages = list()\nattr = list()\nlabels = list()\nfor each in lines_attr:\n tokens = each.split(', ')\n labels.append(list_label[tokens[0]])\n img_path = tokens[1]\n image = preprocess(Image.open(os.path.join(img_root, img_path)).convert(\"RGB\"))\n images.append(image)\n attr_r = list(map(int, tokens[2].split()[1:-1]))\n attr.append([val for i,val in enumerate(list_attribute_value) if attr_r[i] == 1])",
"_____no_output_____"
],
[
"# Dump processed image and text to local\nwith open('../checkpoints/data_img_raw.pkl', 'wb') as file:\n pickle.dump(images, file)\nwith open('../checkpoints/data_txt_raw.pkl', 'wb') as file:\n pickle.dump({'label': labels, 'att': attr}, file)",
"_____no_output_____"
]
],
[
[
"## Option 2 Load LAD Dataset from Saved Files",
"_____no_output_____"
]
],
[
[
"with open('../checkpoints/data_img_raw.pkl', 'rb') as file:\n images = pickle.load(file)\nwith open('../checkpoints/data_txt_raw.pkl', 'rb') as file:\n b = pickle.load(file)\n \nlabels = b['label']\nattr = b['att']",
"_____no_output_____"
]
],
[
[
"# Step 2: Obtain the Image and Text Features\n## Option 1 Load CLIP to obtain features",
"_____no_output_____"
]
],
[
[
"# normalize images\nimage_input = torch.tensor(np.stack(images)).cuda()\nimage_input -= image_mean[:, None, None]\nimage_input /= image_std[:, None, None]",
"_____no_output_____"
],
[
"# Convert labels to tokens\ntokenizer_label = SimpleTokenizer()\ntext_tokens = [tokenizer_label.encode(desc) for desc in labels]\n\nsot_token = tokenizer_label.encoder['<|startoftext|>']\neot_token = tokenizer_label.encoder['<|endoftext|>']\n\ntext_inputs_label = torch.zeros(len(text_tokens), model.context_length, dtype=torch.long)\nfor i, tokens in enumerate(text_tokens):\n tokens = [sot_token] + tokens + [eot_token]\n text_inputs_label[i, :len(tokens)] = torch.tensor(tokens)\ntext_inputs_label = text_inputs_label.cuda()",
"_____no_output_____"
],
[
"# Convert attributes to tokens\ntokenizer_att = SimpleTokenizer()\ntext_tokens = [[tokenizer_att.encode(desc) for desc in att] for att in attr]\n\nsot_token = tokenizer_att.encoder['<|startoftext|>']\neot_token = tokenizer_att.encoder['<|endoftext|>']\ntext_inputs_att = list()\n\nfor j, tokens_img in enumerate(text_tokens):\n text_input = torch.zeros(len(tokens_img), model.context_length, dtype=torch.long)\n for i, tokens in enumerate(tokens_img):\n tokens = [sot_token] + tokens + [eot_token]\n text_input[i, :len(tokens)] = torch.tensor(tokens)\n text_inputs_att.append(text_input.cuda())\n",
"_____no_output_____"
],
[
"# Load CLIP model",
"_____no_output_____"
],
[
"with torch.no_grad():\n image_features = model.encode_image(image_input).float()",
"/tmp/ipykernel_1849104/3267874097.py:2: UserWarning: floor_divide is deprecated, and will be removed in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values.\nTo keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor'). (Triggered internally at /opt/conda/conda-bld/pytorch_1623448278899/work/aten/src/ATen/native/BinaryOps.cpp:467.)\n image_features = model.encode_image(image_input).float()\n"
],
[
"with torch.no_grad():\n label_fea = model.encode_text(text_inputs_label.cuda()).float()",
"_____no_output_____"
],
[
"with torch.no_grad():\n text_feature = list()\n for txt in text_inputs_att:\n if len(txt) == 0:\n text_feature.append(torch.empty(0, 512).cuda())\n else:\n text_feature.append(model.encode_text(txt).float())",
"_____no_output_____"
],
[
"image_features /= image_features.norm(dim=-1, keepdim=True)\n\nlabel_fea /= label_fea.norm(dim=-1, keepdim=True)\n\ntext_feature = torch.stack([torch.mean(item,0) for item in text_feature])\ntext_feature /= text_feature.norm(dim=-1, keepdim=True)",
"_____no_output_____"
],
[
"# Save image and text features\nwith open('../checkpoints/data_txt_feature.pkl', 'wb') as file:\n pickle.dump({'label': label_fea, 'att': text_feature}, file)\nwith open('../checkpoints/data_img_feature.pkl', 'wb') as file:\n pickle.dump(image_features, file)",
"_____no_output_____"
]
],
[
[
"# Option 2 Load saved image and text features",
"_____no_output_____"
]
],
[
[
"with open('../checkpoints/data_txt_feature.pkl', 'rb') as file:\n b = pickle.load(file)\n\nlabel_fea = b['label']\ntext_feature = b['att']",
"_____no_output_____"
],
[
"with open('../checkpoints/data_img_feature.pkl', 'rb') as file:\n image_features = pickle.load(file)",
"_____no_output_____"
]
],
[
[
"# Construct the dataloader for classification",
"_____no_output_____"
]
],
[
[
"from torch.utils.data import Dataset\nfrom sklearn import preprocessing\n\nclass Dataset(Dataset):\n\n def __init__(self, image_features, text_feature, labels, data_indx):\n self.image_features = image_features\n self.text_feature = text_feature\n self.labels = labels\n self.data_indx = data_indx\n# self.imgs = image_input\n# self.attr = attr\n\n def __len__(self):\n return len(self.image_features)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n sample = {'image': self.image_features[idx], \n 'attribute': self.text_feature[idx], \n 'label': self.labels[idx],\n 'data_indx': self.data_indx[idx]\n# 'imgs': self.imgs[idx],\n# 'attr': self.attr[idx]\n }\n\n return sample\n \n\nle = preprocessing.LabelEncoder()\nle.fit(labels)\nclass_list = list(le.classes_)\nlabels_list = torch.tensor(le.transform(labels)).cuda()\n\nattr_ = [';'.join(attr[0]) for item in attr]\ndata_indx = list(range(4600))\n# dataset = Dataset(image_features, text_feature, labels_list, torch.tensor(np.stack(images)).cuda(), attr_)\ndataset = Dataset(image_features, text_feature, labels_list, data_indx)\ntrain_set, test_set = torch.utils.data.random_split(dataset,[4600-500,500])\ntrainloader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)\ntestloader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=True)",
"_____no_output_____"
],
[
"import torch.nn as nn\nfrom torch.utils.data import DataLoader\n# defining the model architecture\nclass Net(nn.Module): \n def __init__(self):\n super(Net, self).__init__()\n\n self.linear_layers = nn.Sequential(\n nn.Linear(1024, 512),\n nn.Linear(512, 230)\n )\n\n # Defining the forward pass \n def forward(self, x, t):\n con = torch.cat((x, t), 1)\n out = self.linear_layers(con)\n return out",
"_____no_output_____"
],
[
"model = Net().cuda()\nerror = nn.CrossEntropyLoss().cuda()\nlearning_rate = 0.01\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\nscheduler = StepLR(optimizer, step_size=10, gamma=0.1)",
"_____no_output_____"
],
[
"num_epochs = 30\n# Lists for visualization of loss and accuracy \nepoch_list = []\ntrain_accuracy_list = []\ntrain_loss_list = []\nvalid_accuracy_list = []\nvalid_loss_list = []\nPATH = \"../checkpoints/cnn.pth\"\n\nfor epoch in range(num_epochs):\n correct = 0\n running_loss = 0\n model.train()\n for data in trainloader:\n # Transfering images and labels to GPU if available\n# image_batch, text_batch, label_batch, im_batch, att_batch = data['image'], data['attribute'], data['label'], data['imgs'], data['attr']\n image_batch, text_batch, label_batch, idx_batch = data['image'], data['attribute'], data['label'], data['data_indx']\n # Forward pass \n outputs = model(image_batch, text_batch)\n #CrossEntropyLoss expects floating point inputs and long labels.\n loss = error(outputs, label_batch)\n # Initializing a gradient as 0 so there is no mixing of gradient among the batches\n optimizer.zero_grad()\n #Propagating the error backward\n loss.backward()\n # Optimizing the parameters\n optimizer.step()\n \n predictions = torch.max(outputs, 1)[1].cuda()\n correct += (predictions == label_batch).sum()\n running_loss += loss.item()\n\n train_loss_list.append(float(running_loss) / float(len(trainloader.dataset)))\n train_accuracy_list.append(float(correct) / float(len(trainloader.dataset)))\n \n # test on validation set\n correct = 0\n running_loss = 0\n with torch.no_grad():\n for data in testloader:\n image_batch, text_batch, label_batch, idx_batch = data['image'], data['attribute'], data['label'], data['data_indx']\n\n outputs = model(image_batch, text_batch)\n\n predictions = torch.max(outputs, 1)[1].cuda()\n correct += (predictions == label_batch).sum()\n running_loss += loss.item()\n\n\n \n valid_loss_list.append(float(running_loss) / float(len(testloader.dataset)))\n valid_accuracy_list.append(float(correct) / float(len(testloader.dataset)))\n \n print(\"Epoch: {}, train_loss: {}, train_accuracy: {}%, test_loss: {}, test_accuracy: {}%\".format(epoch, \n train_loss_list[-1], \n train_accuracy_list[-1], \n valid_loss_list[-1], \n valid_accuracy_list[-1]))\n \n \n \n epoch_list.append(epoch) \n scheduler.step()\n \n if (epoch % 10) == 0:\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict()\n }, PATH)",
"Epoch: 0, train_loss: 0.047759719650919845, train_accuracy: 0.2975609756097561%, test_loss: 0.0199383544921875, test_accuracy: 0.626%\nEpoch: 1, train_loss: 0.010961451014367546, train_accuracy: 0.7914634146341464%, test_loss: 0.011954001426696777, test_accuracy: 0.792%\nEpoch: 2, train_loss: 0.005725246373473145, train_accuracy: 0.8939024390243903%, test_loss: 0.0018081858158111573, test_accuracy: 0.792%\nEpoch: 3, train_loss: 0.002696266144616302, train_accuracy: 0.9490243902439024%, test_loss: 6.169255450367928e-05, test_accuracy: 0.858%\nEpoch: 4, train_loss: 0.0013520190019796535, train_accuracy: 0.9782926829268292%, test_loss: 0.0012208878993988036, test_accuracy: 0.864%\nEpoch: 5, train_loss: 0.0012840903841140794, train_accuracy: 0.9819512195121951%, test_loss: 0.0001842034310102463, test_accuracy: 0.864%\nEpoch: 6, train_loss: 0.0005731607787311077, train_accuracy: 0.9917073170731707%, test_loss: 0.0013498475551605225, test_accuracy: 0.884%\nEpoch: 7, train_loss: 0.0008861863283758483, train_accuracy: 0.9826829268292683%, test_loss: 0.00024289563298225404, test_accuracy: 0.886%\nEpoch: 8, train_loss: 0.0005998159925703232, train_accuracy: 0.9902439024390244%, test_loss: 0.00013991950452327728, test_accuracy: 0.89%\nEpoch: 9, train_loss: 0.00035051648118873923, train_accuracy: 0.9946341463414634%, test_loss: 0.00011046461015939712, test_accuracy: 0.89%\nEpoch: 10, train_loss: 0.00011639534245903899, train_accuracy: 0.9985365853658537%, test_loss: 1.2775518000125885e-05, test_accuracy: 0.896%\nEpoch: 11, train_loss: 6.499487747688119e-05, train_accuracy: 1.0%, test_loss: 5.4937940090894696e-05, test_accuracy: 0.898%\nEpoch: 12, train_loss: 6.362593673146898e-05, train_accuracy: 1.0%, test_loss: 0.00034964525699615476, test_accuracy: 0.898%\nEpoch: 13, train_loss: 5.620986578145587e-05, train_accuracy: 1.0%, test_loss: 5.933334678411484e-05, test_accuracy: 0.898%\nEpoch: 14, train_loss: 5.2860194033511526e-05, train_accuracy: 1.0%, test_loss: 5.308554321527481e-05, test_accuracy: 0.896%\nEpoch: 15, train_loss: 5.039879795243373e-05, train_accuracy: 1.0%, test_loss: 3.925072960555554e-06, test_accuracy: 0.894%\nEpoch: 16, train_loss: 5.108877354892107e-05, train_accuracy: 1.0%, test_loss: 0.00019007441401481628, test_accuracy: 0.894%\nEpoch: 17, train_loss: 4.7076652536350415e-05, train_accuracy: 1.0%, test_loss: 1.7780134454369546e-05, test_accuracy: 0.888%\nEpoch: 18, train_loss: 4.621231008502768e-05, train_accuracy: 1.0%, test_loss: 4.428032040596008e-05, test_accuracy: 0.89%\nEpoch: 19, train_loss: 4.4482753260015715e-05, train_accuracy: 1.0%, test_loss: 3.4897830337286e-05, test_accuracy: 0.89%\nEpoch: 20, train_loss: 4.28462291999561e-05, train_accuracy: 1.0%, test_loss: 3.27441468834877e-05, test_accuracy: 0.89%\nEpoch: 21, train_loss: 4.370108607406841e-05, train_accuracy: 1.0%, test_loss: 0.00010193941742181777, test_accuracy: 0.89%\nEpoch: 22, train_loss: 4.2217147022644736e-05, train_accuracy: 1.0%, test_loss: 7.306153886020183e-06, test_accuracy: 0.89%\nEpoch: 23, train_loss: 4.2204602860005167e-05, train_accuracy: 1.0%, test_loss: 1.5095522627234458e-05, test_accuracy: 0.89%\nEpoch: 24, train_loss: 4.3904819616639036e-05, train_accuracy: 1.0%, test_loss: 0.00014409585297107697, test_accuracy: 0.89%\nEpoch: 25, train_loss: 4.1854783392897464e-05, train_accuracy: 1.0%, test_loss: 9.19707864522934e-06, test_accuracy: 0.892%\nEpoch: 26, train_loss: 4.1993607358052966e-05, train_accuracy: 1.0%, test_loss: 2.6926299557089806e-05, test_accuracy: 0.892%\nEpoch: 27, train_loss: 4.275702593121223e-05, train_accuracy: 1.0%, test_loss: 9.08774584531784e-05, test_accuracy: 0.892%\nEpoch: 28, train_loss: 4.498129012063146e-05, train_accuracy: 1.0%, test_loss: 0.0002562724649906158, test_accuracy: 0.892%\nEpoch: 29, train_loss: 4.163291356412739e-05, train_accuracy: 1.0%, test_loss: 3.178300708532333e-05, test_accuracy: 0.892%\n"
],
[
"m = nn.Softmax()",
"_____no_output_____"
],
[
"data = next(iter(testloader))\nimage_batch, text_batch, label_batch, idx_batch = data['image'], data['attribute'], data['label'], data['data_indx']\noutputs = model(image_batch, text_batch)\n\nfor id in range(64):\n plt.imshow(images[idx_batch[id]].cpu().detach().permute(1, 2, 0))\n plt.show()\n print(m(outputs[id]).cpu().topk(3, dim=-1))\n top3 = m(outputs[id]).cpu().topk(3, dim=-1).indices\n print([class_list[i] for i in top3])\n print(attr[idx_batch[id]])",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2542b7fc6fa5919468c86c9f9b7eadaea95b16
| 100,232 |
ipynb
|
Jupyter Notebook
|
(G) Analysis - 50 States + DC Models.ipynb
|
rethinkpriorities/rp_2020_benchmarking_poll
|
8e854599c146bd00e047d82242529fc266566ef8
|
[
"MIT"
] | 2 |
2020-10-29T07:10:59.000Z
|
2021-11-22T23:32:08.000Z
|
(G) Analysis - 50 States + DC Models.ipynb
|
rethinkpriorities/rp_2020_benchmarking_poll
|
8e854599c146bd00e047d82242529fc266566ef8
|
[
"MIT"
] | 1 |
2020-10-26T23:13:20.000Z
|
2020-10-29T06:37:10.000Z
|
(G) Analysis - 50 States + DC Models.ipynb
|
rethinkpriorities/rp_2020_benchmarking_poll
|
8e854599c146bd00e047d82242529fc266566ef8
|
[
"MIT"
] | null | null | null | 44.666667 | 379 | 0.490622 |
[
[
[
"## Libraries",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport scipy.stats as stat\n\nfrom math import sqrt\nfrom mlgear.utils import show, display_columns\nfrom surveyweights import normalize_weights, run_weighting_iteration\n\n\ndef margin_of_error(n=None, sd=None, p=None, type='proportion', interval_size=0.95):\n z_lookup = {0.8: 1.28, 0.85: 1.44, 0.9: 1.65, 0.95: 1.96, 0.99: 2.58}\n if interval_size not in z_lookup.keys():\n raise ValueError('{} not a valid `interval_size` - must be {}'.format(interval_size,\n ', '.join(list(z_lookup.keys()))))\n if type == 'proportion':\n se = sqrt(p * (1 - p)) / sqrt(n)\n elif type == 'continuous':\n se = sd / sqrt(n)\n else:\n raise ValueError('{} not a valid `type` - must be proportion or continuous')\n \n z = z_lookup[interval_size]\n return se * z\n\n\ndef print_pct(pct, digits=0):\n pct = pct * 100\n pct = np.round(pct, digits)\n if pct >= 100:\n if digits == 0:\n val = '>99.0%'\n else:\n val = '>99.'\n for d in range(digits - 1):\n val += '9'\n val += '9%'\n elif pct <= 0:\n if digits == 0:\n val = '<0.1%'\n else:\n val = '<0.'\n for d in range(digits - 1):\n val += '0'\n val += '1%'\n else:\n val = '{}%'.format(pct)\n return val\n\n\ndef calc_result(biden_vote, trump_vote, n, interval=0.8):\n GENERAL_POLLING_ERROR = 5.0\n N_SIMS = 100000\n \n biden_moe = margin_of_error(n=n, p=biden_vote/100, interval_size=interval)\n trump_moe = margin_of_error(n=n, p=trump_vote/100, interval_size=interval)\n undecided = (100 - biden_vote - trump_vote) / 2\n\n biden_mean = biden_vote + undecided * 0.25\n biden_raw_moe = biden_moe * 100\n biden_allocate_undecided = undecided * 0.4\n biden_margin = biden_raw_moe + biden_allocate_undecided + GENERAL_POLLING_ERROR\n \n trump_mean = trump_vote + undecided * 0.25\n trump_raw_moe = trump_moe * 100\n trump_allocate_undecided = undecided * 0.4\n trump_margin = trump_raw_moe + trump_allocate_undecided + GENERAL_POLLING_ERROR\n \n cdf_value = 0.5 + 0.5 * interval\n normed_sigma = stat.norm.ppf(cdf_value)\n \n biden_sigma = biden_margin / 100 / normed_sigma\n biden_sims = np.random.normal(biden_mean / 100, biden_sigma, N_SIMS)\n \n trump_sigma = trump_margin / 100 / normed_sigma\n trump_sims = np.random.normal(trump_mean / 100, trump_sigma, N_SIMS)\n \n chance_pass = np.sum([sim[0] > sim[1] for sim in zip(biden_sims, trump_sims)]) / N_SIMS\n \n low, high = np.percentile(biden_sims - trump_sims, [20, 80]) * 100\n \n return {'mean': biden_mean - trump_mean, 'high': high, 'low': low, 'n': n,\n 'raw_moe': biden_raw_moe + trump_raw_moe,\n 'margin': (biden_margin + trump_margin) / 2,\n 'sigma': (biden_sigma + trump_sigma) / 2,\n 'chance_pass': chance_pass}\n\n\ndef print_result(mean, high, low, n, raw_moe, margin, sigma, chance_pass):\n mean = np.round(mean, 1)\n first = np.round(high, 1)\n second = np.round(low, 1)\n sigma = np.round(sigma * 100, 1)\n raw_moe = np.round(raw_moe, 1)\n margin = np.round(margin, 1)\n chance_pass = print_pct(chance_pass, 1)\n if second < first:\n _ = first\n first = second\n second = _\n if second > 100:\n second = 100\n if first < -100:\n first = -100\n print(('Result Biden {} (80% CI: {} to {}) (Weighted N={}) (raw_moe={}pts, margin={}pts, '\n 'sigma={}pts) (Biden {} likely to win)').format(mean,\n first,\n second,\n n,\n raw_moe,\n margin,\n sigma,\n chance_pass))\n print(('Biden {} (80% CI: {} to {}) ({} Biden)').format(mean,\n first,\n second,\n chance_pass))\n print('-')\n\n\ndef calc_result_sen(dem_vote, rep_vote, n, interval=0.8):\n GENERAL_POLLING_ERROR = 5.0\n N_SIMS = 100000\n \n dem_moe = margin_of_error(n=n, p=dem_vote/100, interval_size=interval)\n rep_moe = margin_of_error(n=n, p=rep_vote/100, interval_size=interval)\n undecided = 100 - dem_vote - rep_vote\n\n dem_mean = dem_vote + undecided * 0.25\n dem_raw_moe = dem_moe * 100\n dem_allocate_undecided = undecided * 0.4\n dem_margin = dem_raw_moe + dem_allocate_undecided + GENERAL_POLLING_ERROR\n \n rep_mean = rep_vote + undecided * 0.25\n rep_raw_moe = rep_moe * 100\n rep_allocate_undecided = undecided * 0.4\n rep_margin = rep_raw_moe + rep_allocate_undecided + GENERAL_POLLING_ERROR\n \n cdf_value = 0.5 + 0.5 * interval\n normed_sigma = stat.norm.ppf(cdf_value)\n \n dem_sigma = dem_margin / 100 / normed_sigma\n dem_sims = np.random.normal(dem_mean / 100, dem_sigma, N_SIMS)\n \n rep_sigma = rep_margin / 100 / normed_sigma\n rep_sims = np.random.normal(rep_mean / 100, rep_sigma, N_SIMS)\n \n chance_pass = np.sum([sim[0] > sim[1] for sim in zip(dem_sims, rep_sims)]) / N_SIMS\n \n low, high = np.percentile(dem_sims - rep_sims, [20, 80]) * 100\n \n return {'mean': dem_mean - rep_mean, 'high': high, 'low': low, 'n': n,\n 'raw_moe': dem_raw_moe + rep_raw_moe,\n 'margin': (dem_margin + rep_margin) / 2,\n 'sigma': (dem_sigma + rep_sigma) / 2,\n 'chance_pass': chance_pass}\n\n\ndef print_result_sen(mean, high, low, n, raw_moe, margin, sigma, chance_pass):\n mean = np.round(mean, 1)\n first = np.round(high, 1)\n second = np.round(low, 1)\n sigma = np.round(sigma * 100, 1)\n raw_moe = np.round(raw_moe, 1)\n margin = np.round(margin, 1)\n chance_pass = print_pct(chance_pass, 1)\n if second < first:\n _ = first\n first = second\n second = _\n if second > 100:\n second = 100\n if first < -100:\n first = -100\n print(('Result Dem Sen {} (80% CI: {} to {}) (Weighted N={}) (raw_moe={}pts, margin={}pts, '\n 'sigma={}pts) (Dem Sen {} likely to win)').format(mean,\n first,\n second,\n n,\n raw_moe,\n margin,\n sigma,\n chance_pass))\n print(('Dem {} (80% CI: {} to {}) ({} Dem)').format(mean,\n first,\n second,\n chance_pass))\n\n print('-')",
"_____no_output_____"
]
],
[
[
"## Load Processed Data",
"_____no_output_____"
]
],
[
[
"survey = pd.read_csv('responses_processed_national_weighted.csv').fillna('Not presented')",
"/Users/peterhurford/.virtualenvs/dev/lib/python3.8/site-packages/IPython/core/interactiveshell.py:3145: DtypeWarning: Columns (9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,40,41,42,43,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112) have mixed types.Specify dtype option on import or set low_memory=False.\n has_raised = await self.run_ast_nodes(code_ast.body, cell_name,\n"
]
],
[
[
"## State Presidential Models",
"_____no_output_____"
]
],
[
[
"POTUS_CENSUS = {'Alabama': {'Hillary Clinton': 0.3436, 'Donald Trump': 0.6208},\n 'Alaska': {'Hillary Clinton': 0.3655, 'Donald Trump': 0.5128},\n 'Arizona': {'Hillary Clinton': 0.4513, 'Donald Trump': 0.4867},\n 'Arkansas': {'Hillary Clinton': 0.3365, 'Donald Trump': 0.6057},\n 'California': {'Hillary Clinton': 0.6173, 'Donald Trump': 0.3162},\n 'Colorado': {'Hillary Clinton': 0.4816, 'Donald Trump': 0.4325},\n 'Connecticut': {'Hillary Clinton': 0.5457, 'Donald Trump': 0.4093},\n 'Delaware': {'Hillary Clinton': 0.531, 'Donald Trump': 0.417},\n 'Washington DC': {'Hillary Clinton': 0.905, 'Donald Trump': 0.016},\n 'Florida': {'Hillary Clinton': 0.478, 'Donald Trump': 0.490},\n 'Georgia': {'Hillary Clinton': 0.456, 'Donald Trump': 0.508},\n 'Hawaii': {'Hillary Clinton': 0.622, 'Donald Trump': 0.300},\n 'Idaho': {'Hillary Clinton': 0.275, 'Donald Trump': 0.593},\n 'Illinois': {'Hillary Clinton': 0.558, 'Donald Trump': 0.379},\n 'Indiana': {'Hillary Clinton': 0.379, 'Donald Trump': 0.511},\n 'Iowa': {'Hillary Clinton': 0.417, 'Donald Trump': 0.512},\n 'Kansas': {'Hillary Clinton': 0.361, 'Donald Trump': 0.567},\n 'Kentucky': {'Hillary Clinton': 0.327, 'Donald Trump': 0.625},\n 'Louisiana': {'Hillary Clinton': 0.385, 'Donald Trump': 0.581},\n 'Maine': {'Hillary Clinton': 0.478, 'Donald Trump': 0.449},\n 'Maryland': {'Hillary Clinton': 0.603, 'Donald Trump': 0.339},\n 'Massachusetts': {'Hillary Clinton': 0.600, 'Donald Trump': 0.328},\n 'Michigan': {'Hillary Clinton': 0.473, 'Donald Trump': 0.475},\n 'Minnesota': {'Hillary Clinton': 0.464, 'Donald Trump': 0.449},\n 'Mississippi': {'Hillary Clinton': 0.401, 'Donald Trump': 0.579},\n 'Missouri': {'Hillary Clinton': 0.401, 'Donald Trump': 0.579},\n 'Montana': {'Hillary Clinton': 0.381, 'Donald Trump': 0.562},\n 'Nebraska': {'Hillary Clinton': 0.337, 'Donald Trump': 0.588},\n 'Nevada': {'Hillary Clinton': 0.479, 'Donald Trump': 0.455},\n 'New Hampshire': {'Hillary Clinton': 0.470, 'Donald Trump': 0.466},\n 'New Jersey': {'Hillary Clinton': 0.555, 'Donald Trump': 0.414},\n 'New Mexico': {'Hillary Clinton': 0.483, 'Donald Trump': 0.404},\n 'New York': {'Hillary Clinton': 0.590, 'Donald Trump': 0.365},\n 'North Carolina': {'Hillary Clinton': 0.462, 'Donald Trump': 0.498},\n 'North Dakota': {'Hillary Clinton': 0.272, 'Donald Trump': 0.630},\n 'Ohio': {'Hillary Clinton': 0.436, 'Donald Trump': 0.517},\n 'Oklahoma': {'Hillary Clinton': 0.289, 'Donald Trump': 0.653},\n 'Oregon': {'Hillary Clinton': 0.501, 'Donald Trump': 0.391},\n 'Pennsylvania': {'Hillary Clinton': 0.475, 'Donald Trump': 0.481},\n 'Rhode Island': {'Hillary Clinton': 0.544, 'Donald Trump': 0.389},\n 'South Carolina': {'Hillary Clinton': 0.407, 'Donald Trump': 0.549},\n 'South Dakota': {'Hillary Clinton': 0.317, 'Donald Trump': 0.615},\n 'Tennessee': {'Hillary Clinton': 0.347, 'Donald Trump': 0.607},\n 'Texas': {'Hillary Clinton': 0.432, 'Donald Trump': 0.522},\n 'Utah': {'Hillary Clinton': 0.275, 'Donald Trump': 0.454},\n 'Vermont': {'Hillary Clinton': 0.567, 'Donald Trump': 0.303},\n 'Virginia': {'Hillary Clinton': 0.497, 'Donald Trump': 0.444},\n 'Washington': {'Hillary Clinton': 0.525, 'Donald Trump': 0.368},\n 'West Virginia': {'Hillary Clinton': 0.264, 'Donald Trump': 0.685},\n 'Wisconsin': {'Hillary Clinton': 0.465, 'Donald Trump': 0.472},\n 'Wyoming': {'Hillary Clinton': 0.216, 'Donald Trump': 0.674 }}\n\nfor state in POTUS_CENSUS.keys():\n print('## {} ##'.format(state.upper()))\n state_survey = survey.copy()\n potus_census = {'vote2016': POTUS_CENSUS[state].copy()}\n potus_census['vote2016']['Other'] = 1 - potus_census['vote2016']['Hillary Clinton'] - potus_census['vote2016']['Donald Trump']\n output = run_weighting_iteration(state_survey, census=potus_census, weigh_on=['vote2016'], verbose=0)\n potus_weights = output['weights']['vote2016']\n potus_weights = state_survey['vote2016'].astype(str).replace(potus_weights)\n state_survey['weight'] = normalize_weights(state_survey['weight'] * potus_weights)\n state_survey['lv_weight'] = normalize_weights(state_survey['weight'] * state_survey['lv_index'])\n\n options = ['Donald Trump', 'Hillary Clinton', 'Other']\n survey_ = state_survey.loc[state_survey['vote2016'].isin(options)].copy()\n survey_['weight'] = normalize_weights(survey_['weight'])\n survey_['rv_weight'] = normalize_weights(survey_['rv_weight'])\n survey_['lv_weight'] = normalize_weights(survey_['lv_weight'])\n lv_weighted_n = int(np.round(survey_['lv_weight'].apply(lambda w: 1 if w > 1 else w).sum()))\n votes = survey_['vote2016'].value_counts(normalize=True) * survey_.groupby('vote2016')['lv_weight'].mean() * 100\n votes = votes[options] * (100 / votes[options].sum())\n raw_result = potus_census['vote2016']['Hillary Clinton'] - potus_census['vote2016']['Donald Trump']\n print('Raw result: {}'.format(np.round(raw_result * 100, 1)))\n print(votes)\n\n options = ['Joe Biden, the Democrat', 'Donald Trump, the Republican', 'Another candidate', 'Not decided']\n survey_ = state_survey.loc[state_survey['vote_trump_biden'].isin(options)].copy()\n survey_['weight'] = normalize_weights(survey_['weight'])\n survey_['lv_weight'] = normalize_weights(survey_['lv_weight'])\n\n votes = survey_['vote_trump_biden'].value_counts(normalize=True) * survey_.groupby('vote_trump_biden')['lv_weight'].mean() * 100\n votes = votes[options] * (100 / votes[options].sum())\n print(votes)\n print('-')\n print_result(**calc_result(biden_vote=votes['Joe Biden, the Democrat'],\n trump_vote=votes['Donald Trump, the Republican'],\n n=lv_weighted_n))\n print('-')",
"## ALABAMA ##\nRaw result: -27.7\nDonald Trump 62.429369\nHillary Clinton 33.968809\nOther 3.601822\ndtype: float64\nJoe Biden, the Democrat 41.582205\nDonald Trump, the Republican 51.858405\nAnother candidate 3.074509\nNot decided 3.484882\ndtype: float64\n-\nResult Biden -10.3 (80% CI: -17.7 to -2.7) (Weighted N=1232) (raw_moe=3.6pts, margin=8.1pts, sigma=6.3pts) (Biden 12.6% likely to win)\nBiden -10.3 (80% CI: -17.7 to -2.7) (12.6% Biden)\n-\n-\n## ALASKA ##\nRaw result: -14.7\nDonald Trump 51.560631\nHillary Clinton 36.128300\nOther 12.311069\ndtype: float64\nJoe Biden, the Democrat 45.297009\nDonald Trump, the Republican 46.205581\nAnother candidate 4.814070\nNot decided 3.683340\ndtype: float64\n-\nResult Biden -0.9 (80% CI: -8.7 to 7.0) (Weighted N=1285) (raw_moe=3.6pts, margin=8.5pts, sigma=6.6pts) (Biden 46.1% likely to win)\nBiden -0.9 (80% CI: -8.7 to 7.0) (46.1% Biden)\n-\n-\n## ARIZONA ##\nRaw result: -3.5\nDonald Trump 49.025809\nHillary Clinton 44.690858\nOther 6.283333\ndtype: float64\nJoe Biden, the Democrat 49.464968\nDonald Trump, the Republican 43.291311\nAnother candidate 3.660801\nNot decided 3.582920\ndtype: float64\n-\nResult Biden 6.2 (80% CI: -1.4 to 13.8) (Weighted N=1312) (raw_moe=3.5pts, margin=8.2pts, sigma=6.4pts) (Biden 75.3% likely to win)\nBiden 6.2 (80% CI: -1.4 to 13.8) (75.3% Biden)\n-\n-\n## ARKANSAS ##\nRaw result: -26.9\nDonald Trump 60.895242\nHillary Clinton 33.258357\nOther 5.846401\ndtype: float64\nJoe Biden, the Democrat 41.690143\nDonald Trump, the Republican 51.262023\nAnother candidate 3.516458\nNot decided 3.531375\ndtype: float64\n-\nResult Biden -9.6 (80% CI: -17.2 to -1.9) (Weighted N=1244) (raw_moe=3.6pts, margin=8.2pts, sigma=6.4pts) (Biden 14.6% likely to win)\nBiden -9.6 (80% CI: -17.2 to -1.9) (14.6% Biden)\n-\n-\n## CALIFORNIA ##\nRaw result: 30.1\nDonald Trump 31.940637\nHillary Clinton 61.301050\nOther 6.758313\ndtype: float64\nJoe Biden, the Democrat 60.703037\nDonald Trump, the Republican 31.804225\nAnother candidate 3.838530\nNot decided 3.654208\ndtype: float64\n-\nResult Biden 28.9 (80% CI: 21.4 to 36.5) (Weighted N=1357) (raw_moe=3.3pts, margin=8.2pts, sigma=6.4pts) (Biden 99.9% likely to win)\nBiden 28.9 (80% CI: 21.4 to 36.5) (99.9% Biden)\n-\n-\n## COLORADO ##\nRaw result: 4.9\nDonald Trump 43.582304\nHillary Clinton 47.709019\nOther 8.708677\ndtype: float64\nJoe Biden, the Democrat 52.117041\nDonald Trump, the Republican 40.078849\nAnother candidate 4.157091\nNot decided 3.647019\ndtype: float64\n-\nResult Biden 12.0 (80% CI: 4.3 to 19.8) (Weighted N=1333) (raw_moe=3.5pts, margin=8.3pts, sigma=6.5pts) (Biden 90.6% likely to win)\nBiden 12.0 (80% CI: 4.3 to 19.8) (90.6% Biden)\n-\n-\n## CONNECTICUT ##\nRaw result: 13.6\nDonald Trump 41.299968\nHillary Clinton 54.131724\nOther 4.568309\ndtype: float64\nJoe Biden, the Democrat 55.338434\nDonald Trump, the Republican 37.713241\nAnother candidate 3.368254\nNot decided 3.580072\ndtype: float64\n-\nResult Biden 17.6 (80% CI: 10.1 to 25.1) (Weighted N=1336) (raw_moe=3.4pts, margin=8.1pts, sigma=6.3pts) (Biden 97.5% likely to win)\nBiden 17.6 (80% CI: 10.1 to 25.1) (97.5% Biden)\n-\n-\n## DELAWARE ##\nRaw result: 11.4\nDonald Trump 42.064564\nHillary Clinton 52.658052\nOther 5.277383\ndtype: float64\nJoe Biden, the Democrat 54.536825\nDonald Trump, the Republican 38.371588\nAnother candidate 3.501424\nNot decided 3.590163\ndtype: float64\n-\nResult Biden 16.2 (80% CI: 8.6 to 23.7) (Weighted N=1336) (raw_moe=3.4pts, margin=8.1pts, sigma=6.4pts) (Biden 96.4% likely to win)\nBiden 16.2 (80% CI: 8.6 to 23.7) (96.4% Biden)\n-\n-\n## WASHINGTON DC ##\nRaw result: 88.9\nDonald Trump 1.624085\nHillary Clinton 90.308197\nOther 8.067718\ndtype: float64\nJoe Biden, the Democrat 80.407110\nDonald Trump, the Republican 11.560846\nAnother candidate 4.243166\nNot decided 3.788879\ndtype: float64\n-\nResult Biden 68.8 (80% CI: 61.5 to 76.2) (Weighted N=1165) (raw_moe=2.7pts, margin=8.0pts, sigma=6.2pts) (Biden >99.9% likely to win)\nBiden 68.8 (80% CI: 61.5 to 76.2) (>99.9% Biden)\n-\n-\n## FLORIDA ##\nRaw result: -1.2\nDonald Trump 49.389777\nHillary Clinton 47.365139\nOther 3.245084\ndtype: float64\nJoe Biden, the Democrat 50.465195\nDonald Trump, the Republican 42.936881\nAnother candidate 3.071612\nNot decided 3.526312\ndtype: float64\n-\nResult Biden 7.5 (80% CI: -0.0 to 15.0) (Weighted N=1303) (raw_moe=3.5pts, margin=8.1pts, sigma=6.3pts) (Biden 79.9% likely to win)\nBiden 7.5 (80% CI: -0.0 to 15.0) (79.9% Biden)\n-\n-\n## GEORGIA ##\nRaw result: -5.2\nDonald Trump 51.183638\nHillary Clinton 45.167101\nOther 3.649261\ndtype: float64\nJoe Biden, the Democrat 49.098485\nDonald Trump, the Republican 44.233785\nAnother candidate 3.140654\nNot decided 3.527075\ndtype: float64\n-\nResult Biden 4.9 (80% CI: -2.7 to 12.4) (Weighted N=1297) (raw_moe=3.5pts, margin=8.1pts, sigma=6.3pts) (Biden 70.5% likely to win)\nBiden 4.9 (80% CI: -2.7 to 12.4) (70.5% Biden)\n-\n-\n## HAWAII ##\nRaw result: 32.2\nDonald Trump 30.304501\nHillary Clinton 61.768379\nOther 7.927120\ndtype: float64\nJoe Biden, the Democrat 61.319445\nDonald Trump, the Republican 30.926547\nAnother candidate 4.072553\nNot decided 3.681455\ndtype: float64\n-\nResult Biden 30.4 (80% CI: 22.8 to 38.1) (Weighted N=1359) (raw_moe=3.3pts, margin=8.2pts, sigma=6.4pts) (Biden >99.9% likely to win)\nBiden 30.4 (80% CI: 22.8 to 38.1) (>99.9% Biden)\n-\n-\n## IDAHO ##\nRaw result: -31.8\nDonald Trump 59.529129\nHillary Clinton 27.139226\nOther 13.331645\ndtype: float64\nJoe Biden, the Democrat 39.534945\nDonald Trump, the Republican 51.820386\nAnother candidate 4.971968\nNot decided 3.672701\ndtype: float64\n-\nResult Biden -12.3 (80% CI: -20.2 to -4.3) (Weighted N=1224) (raw_moe=3.6pts, margin=8.5pts, sigma=6.7pts) (Biden 9.7% likely to win)\nBiden -12.3 (80% CI: -20.2 to -4.3) (9.7% Biden)\n-\n-\n## ILLINOIS ##\nRaw result: 17.9\nDonald Trump 38.246383\nHillary Clinton 55.357349\nOther 6.396268\ndtype: float64\nJoe Biden, the Democrat 56.633918\nDonald Trump, the Republican 36.004685\nAnother candidate 3.736882\nNot decided 3.624515\ndtype: float64\n-\nResult Biden 20.6 (80% CI: 13.1 to 28.2) (Weighted N=1347) (raw_moe=3.4pts, margin=8.2pts, sigma=6.4pts) (Biden 99.0% likely to win)\nBiden 20.6 (80% CI: 13.1 to 28.2) (99.0% Biden)\n-\n-\n## INDIANA ##\nRaw result: -13.2\nDonald Trump 51.395128\nHillary Clinton 37.474012\nOther 11.130859\ndtype: float64\nJoe Biden, the Democrat 45.891706\nDonald Trump, the Republican 45.859312\nAnother candidate 4.586525\nNot decided 3.662457\ndtype: float64\n-\nResult Biden 0.0 (80% CI: -7.8 to 7.8) (Weighted N=1291) (raw_moe=3.6pts, margin=8.4pts, sigma=6.6pts) (Biden 50.2% likely to win)\nBiden 0.0 (80% CI: -7.8 to 7.8) (50.2% Biden)\n-\n-\n## IOWA ##\nRaw result: -9.5\nDonald Trump 51.541335\nHillary Clinton 41.267835\nOther 7.190830\ndtype: float64\nJoe Biden, the Democrat 47.408286\nDonald Trump, the Republican 45.177885\nAnother candidate 3.823629\nNot decided 3.590201\ndtype: float64\n-\nResult Biden 2.2 (80% CI: -5.4 to 10.0) (Weighted N=1299) (raw_moe=3.5pts, margin=8.3pts, sigma=6.4pts) (Biden 59.8% likely to win)\nBiden 2.2 (80% CI: -5.4 to 10.0) (59.8% Biden)\n-\n-\n## KANSAS ##\nRaw result: -20.6\nDonald Trump 57.023269\nHillary Clinton 35.691614\nOther 7.285117\ndtype: float64\nJoe Biden, the Democrat 43.695977\nDonald Trump, the Republican 48.917939\nAnother candidate 3.814294\nNot decided 3.571789\ndtype: float64\n-\nResult Biden -5.2 (80% CI: -13.0 to 2.5) (Weighted N=1268) (raw_moe=3.6pts, margin=8.3pts, sigma=6.5pts) (Biden 28.4% likely to win)\nBiden -5.2 (80% CI: -13.0 to 2.5) (28.4% Biden)\n-\n-\n## KENTUCKY ##\nRaw result: -29.8\nDonald Trump 62.829221\nHillary Clinton 32.316130\nOther 4.854649\ndtype: float64\nJoe Biden, the Democrat 40.800152\nDonald Trump, the Republican 52.378806\nAnother candidate 3.314828\nNot decided 3.506214\ndtype: float64\n-\nResult Biden -11.6 (80% CI: -19.1 to -4.0) (Weighted N=1231) (raw_moe=3.6pts, margin=8.2pts, sigma=6.4pts) (Biden 10.0% likely to win)\nBiden -11.6 (80% CI: -19.1 to -4.0) (10.0% Biden)\n-\n-\n## LOUISIANA ##\n"
]
],
[
[
"## State Models (Alt Weights, Post-Hoc)",
"_____no_output_____"
]
],
[
[
"for state in POTUS_CENSUS.keys():\n print('## {} ##'.format(state.upper()))\n state_survey = survey.copy()\n potus_census = {'vote2016': POTUS_CENSUS[state].copy()}\n potus_census['vote2016']['Other'] = 1 - potus_census['vote2016']['Hillary Clinton'] - potus_census['vote2016']['Donald Trump']\n output = run_weighting_iteration(state_survey, census=potus_census, weigh_on=['vote2016'], verbose=0)\n potus_weights = output['weights']['vote2016']\n potus_weights = state_survey['vote2016'].astype(str).replace(potus_weights)\n state_survey['weight'] = normalize_weights(state_survey['weight'] * potus_weights)\n state_survey['lv_weight'] = normalize_weights(state_survey['weight'] * state_survey['lv_index'])\n state_survey['lv_weight_alt'] = state_survey['lv_weight']\n state_survey.loc[(~state_survey['voted2016']) & (state_survey['vote_trump_biden'] == 'Donald Trump, the Republican'), 'lv_weight_alt'] *= 1.662\n state_survey['lv_weight_alt'] = normalize_weights(state_survey['lv_weight_alt'])\n options = ['Joe Biden, the Democrat', 'Donald Trump, the Republican', 'Another candidate', 'Not decided']\n survey_ = state_survey.loc[state_survey['vote_trump_biden'].isin(options)].copy()\n survey_['lv_weight_alt'] = normalize_weights(survey_['lv_weight_alt'])\n votes = survey_['vote_trump_biden'].value_counts(normalize=True) * survey_.groupby('vote_trump_biden')['lv_weight_alt'].mean() * 100\n votes = votes[options] * (100 / votes[options].sum())\n print(votes)\n print('-')\n print_result(**calc_result(biden_vote=votes['Joe Biden, the Democrat'],\n trump_vote=votes['Donald Trump, the Republican'],\n n=lv_weighted_n))\n print('-')",
"## ALABAMA ##\nJoe Biden, the Democrat 40.246172\nDonald Trump, the Republican 53.405190\nAnother candidate 2.975725\nNot decided 3.372913\ndtype: float64\n-\nResult Biden -13.2 (80% CI: -20.7 to -5.6) (Weighted N=1164) (raw_moe=3.7pts, margin=8.1pts, sigma=6.3pts) (Biden 7.0% likely to win)\nBiden -13.2 (80% CI: -20.7 to -5.6) (7.0% Biden)\n-\n-\n## ALASKA ##\nJoe Biden, the Democrat 43.841908\nDonald Trump, the Republican 47.933648\nAnother candidate 4.659425\nNot decided 3.565018\ndtype: float64\n-\nResult Biden -4.1 (80% CI: -12.0 to 3.7) (Weighted N=1164) (raw_moe=3.7pts, margin=8.5pts, sigma=6.6pts) (Biden 32.9% likely to win)\nBiden -4.1 (80% CI: -12.0 to 3.7) (32.9% Biden)\n-\n-\n## ARIZONA ##\nJoe Biden, the Democrat 47.873917\nDonald Trump, the Republican 45.115358\nAnother candidate 3.543051\nNot decided 3.467674\ndtype: float64\n-\nResult Biden 2.8 (80% CI: -5.0 to 10.5) (Weighted N=1164) (raw_moe=3.7pts, margin=8.3pts, sigma=6.5pts) (Biden 61.8% likely to win)\nBiden 2.8 (80% CI: -5.0 to 10.5) (61.8% Biden)\n-\n-\n## ARKANSAS ##\nJoe Biden, the Democrat 40.350906\nDonald Trump, the Republican 52.827662\nAnother candidate 3.403497\nNot decided 3.417935\ndtype: float64\n-\nResult Biden -12.5 (80% CI: -20.2 to -4.8) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 8.4% likely to win)\nBiden -12.5 (80% CI: -20.2 to -4.8) (8.4% Biden)\n-\n-\n## CALIFORNIA ##\nJoe Biden, the Democrat 58.746829\nDonald Trump, the Republican 34.001893\nAnother candidate 3.714830\nNot decided 3.536448\ndtype: float64\n-\nResult Biden 24.7 (80% CI: 17.1 to 32.4) (Weighted N=1164) (raw_moe=3.6pts, margin=8.3pts, sigma=6.4pts) (Biden 99.7% likely to win)\nBiden 24.7 (80% CI: 17.1 to 32.4) (99.7% Biden)\n-\n-\n## COLORADO ##\nJoe Biden, the Democrat 50.440311\nDonald Trump, the Republican 42.006656\nAnother candidate 4.023347\nNot decided 3.529686\ndtype: float64\n-\nResult Biden 8.4 (80% CI: 0.6 to 16.2) (Weighted N=1164) (raw_moe=3.7pts, margin=8.4pts, sigma=6.5pts) (Biden 81.9% likely to win)\nBiden 8.4 (80% CI: 0.6 to 16.2) (81.9% Biden)\n-\n-\n## CONNECTICUT ##\nJoe Biden, the Democrat 53.556371\nDonald Trump, the Republican 39.719060\nAnother candidate 3.259786\nNot decided 3.464783\ndtype: float64\n-\nResult Biden 13.8 (80% CI: 6.2 to 21.4) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 93.8% likely to win)\nBiden 13.8 (80% CI: 6.2 to 21.4) (93.8% Biden)\n-\n-\n## DELAWARE ##\nJoe Biden, the Democrat 52.780938\nDonald Trump, the Republican 40.355798\nAnother candidate 3.388691\nNot decided 3.474573\ndtype: float64\n-\nResult Biden 12.4 (80% CI: 4.8 to 20.1) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 91.4% likely to win)\nBiden 12.4 (80% CI: 4.8 to 20.1) (91.4% Biden)\n-\n-\n## WASHINGTON DC ##\nJoe Biden, the Democrat 77.807488\nDonald Trump, the Republican 14.420149\nAnother candidate 4.105981\nNot decided 3.666381\ndtype: float64\n-\nResult Biden 63.4 (80% CI: 55.9 to 70.8) (Weighted N=1164) (raw_moe=2.9pts, margin=8.0pts, sigma=6.2pts) (Biden >99.9% likely to win)\nBiden 63.4 (80% CI: 55.9 to 70.8) (>99.9% Biden)\n-\n-\n## FLORIDA ##\nJoe Biden, the Democrat 48.841219\nDonald Trump, the Republican 44.773179\nAnother candidate 2.972767\nNot decided 3.412835\ndtype: float64\n-\nResult Biden 4.1 (80% CI: -3.5 to 11.6) (Weighted N=1164) (raw_moe=3.7pts, margin=8.1pts, sigma=6.4pts) (Biden 67.3% likely to win)\nBiden 4.1 (80% CI: -3.5 to 11.6) (67.3% Biden)\n-\n-\n## GEORGIA ##\nJoe Biden, the Democrat 47.518922\nDonald Trump, the Republican 46.027858\nAnother candidate 3.039615\nNot decided 3.413605\ndtype: float64\n-\nResult Biden 1.5 (80% CI: -6.1 to 9.1) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 56.6% likely to win)\nBiden 1.5 (80% CI: -6.1 to 9.1) (56.6% Biden)\n-\n-\n## HAWAII ##\nJoe Biden, the Democrat 59.343385\nDonald Trump, the Republican 33.152485\nAnother candidate 3.941313\nNot decided 3.562817\ndtype: float64\n-\nResult Biden 26.2 (80% CI: 18.5 to 33.9) (Weighted N=1164) (raw_moe=3.6pts, margin=8.3pts, sigma=6.5pts) (Biden 99.8% likely to win)\nBiden 26.2 (80% CI: 18.5 to 33.9) (99.8% Biden)\n-\n-\n## IDAHO ##\nJoe Biden, the Democrat 38.266328\nDonald Trump, the Republican 53.366399\nAnother candidate 4.812425\nNot decided 3.554849\ndtype: float64\n-\nResult Biden -15.1 (80% CI: -23.0 to -7.2) (Weighted N=1164) (raw_moe=3.7pts, margin=8.5pts, sigma=6.6pts) (Biden 5.4% likely to win)\nBiden -15.1 (80% CI: -23.0 to -7.2) (5.4% Biden)\n-\n-\n## ILLINOIS ##\nJoe Biden, the Democrat 54.810052\nDonald Trump, the Republican 38.065621\nAnother candidate 3.616538\nNot decided 3.507789\ndtype: float64\n-\nResult Biden 16.7 (80% CI: 9.2 to 24.4) (Weighted N=1164) (raw_moe=3.7pts, margin=8.3pts, sigma=6.5pts) (Biden 96.8% likely to win)\nBiden 16.7 (80% CI: 9.2 to 24.4) (96.8% Biden)\n-\n-\n## INDIANA ##\nJoe Biden, the Democrat 44.417183\nDonald Trump, the Republican 47.598878\nAnother candidate 4.439158\nNot decided 3.544781\ndtype: float64\n-\nResult Biden -3.2 (80% CI: -11.1 to 4.6) (Weighted N=1164) (raw_moe=3.7pts, margin=8.5pts, sigma=6.6pts) (Biden 36.6% likely to win)\nBiden -3.2 (80% CI: -11.1 to 4.6) (36.6% Biden)\n-\n-\n## IOWA ##\nJoe Biden, the Democrat 45.884061\nDonald Trump, the Republican 46.940472\nAnother candidate 3.700695\nNot decided 3.474772\ndtype: float64\n-\nResult Biden -1.1 (80% CI: -8.8 to 6.6) (Weighted N=1164) (raw_moe=3.7pts, margin=8.3pts, sigma=6.5pts) (Biden 45.4% likely to win)\nBiden -1.1 (80% CI: -8.8 to 6.6) (45.4% Biden)\n-\n-\n## KANSAS ##\nJoe Biden, the Democrat 42.292016\nDonald Trump, the Republican 50.559217\nAnother candidate 3.691740\nNot decided 3.457027\ndtype: float64\n-\nResult Biden -8.3 (80% CI: -16.0 to -0.6) (Weighted N=1164) (raw_moe=3.7pts, margin=8.3pts, sigma=6.5pts) (Biden 18.4% likely to win)\nBiden -8.3 (80% CI: -16.0 to -0.6) (18.4% Biden)\n-\n-\n## KENTUCKY ##\nJoe Biden, the Democrat 39.489581\nDonald Trump, the Republican 53.908481\nAnother candidate 3.208350\nNot decided 3.393589\ndtype: float64\n-\nResult Biden -14.4 (80% CI: -22.0 to -6.9) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 5.4% likely to win)\nBiden -14.4 (80% CI: -22.0 to -6.9) (5.4% Biden)\n-\n-\n## LOUISIANA ##\nJoe Biden, the Democrat 42.878442\nDonald Trump, the Republican 50.772145\nAnother candidate 2.965267\nNot decided 3.384145\ndtype: float64\n-\nResult Biden -7.9 (80% CI: -15.5 to -0.3) (Weighted N=1164) (raw_moe=3.7pts, margin=8.1pts, sigma=6.3pts) (Biden 18.9% likely to win)\nBiden -7.9 (80% CI: -15.5 to -0.3) (18.9% Biden)\n-\n-\n## MAINE ##\nJoe Biden, the Democrat 49.880409\nDonald Trump, the Republican 42.848277\nAnother candidate 3.770564\nNot decided 3.500750\ndtype: float64\n-\nResult Biden 7.0 (80% CI: -0.7 to 14.8) (Weighted N=1164) (raw_moe=3.7pts, margin=8.3pts, sigma=6.5pts) (Biden 77.9% likely to win)\nBiden 7.0 (80% CI: -0.7 to 14.8) (77.9% Biden)\n-\n-\n## MARYLAND ##\nJoe Biden, the Democrat 57.603025\nDonald Trump, the Republican 35.341933\nAnother candidate 3.541945\nNot decided 3.513097\ndtype: float64\n-\nResult Biden 22.3 (80% CI: 14.7 to 29.9) (Weighted N=1164) (raw_moe=3.6pts, margin=8.2pts, sigma=6.4pts) (Biden 99.3% likely to win)\nBiden 22.3 (80% CI: 14.7 to 29.9) (99.3% Biden)\n-\n-\n## MASSACHUSETTS ##\nJoe Biden, the Democrat 57.763003\nDonald Trump, the Republican 34.881729\nAnother candidate 3.813186\nNot decided 3.542082\ndtype: float64\n-\nResult Biden 22.9 (80% CI: 15.2 to 30.6) (Weighted N=1164) (raw_moe=3.6pts, margin=8.3pts, sigma=6.5pts) (Biden 99.4% likely to win)\nBiden 22.9 (80% CI: 15.2 to 30.6) (99.4% Biden)\n-\n-\n## MICHIGAN ##\nJoe Biden, the Democrat 49.024569\nDonald Trump, the Republican 44.162022\nAnother candidate 3.359460\nNot decided 3.453948\ndtype: float64\n-\nResult Biden 4.9 (80% CI: -2.9 to 12.5) (Weighted N=1164) (raw_moe=3.7pts, margin=8.2pts, sigma=6.4pts) (Biden 70.2% likely to win)\nBiden 4.9 (80% CI: -2.9 to 12.5) (70.2% Biden)\n-\n-\n## MINNESOTA ##\n"
]
],
[
[
"## Senate Models",
"_____no_output_____"
]
],
[
[
"SENATE_STATES = ['Alabama', 'Alaska', 'Arizona', 'Arkansas', 'Colorado', 'Delaware', 'Georgia',\n 'Idaho', 'Illinois', 'Iowa', 'Kansas', 'Kentucky', 'Louisiana', 'Maine',\n 'Massachusetts', 'Michigan', 'Minnesota', 'Mississippi', 'Montana', 'Nebraska',\n 'New Hampshire', 'New Jersey', 'New Mexico', 'North Carolina', 'Oklahoma',\n 'Oregon', 'Rhode Island', 'South Carolina', 'South Dakota', 'Tennessee',\n 'Texas', 'Virginia', 'West Virginia', 'Wyoming']\n\nfor state in SENATE_STATES:\n print('## {} ##'.format(state.upper()))\n state_survey = survey.copy()\n potus_census = {'vote2016': POTUS_CENSUS[state].copy()}\n potus_census['vote2016']['Other'] = 1 - potus_census['vote2016']['Hillary Clinton'] - potus_census['vote2016']['Donald Trump']\n output = run_weighting_iteration(state_survey, census=potus_census, weigh_on=['vote2016'], verbose=0)\n potus_weights = output['weights']['vote2016']\n potus_weights = state_survey['vote2016'].astype(str).replace(potus_weights)\n state_survey['weight'] = normalize_weights(state_survey['weight'] * potus_weights)\n state_survey['lv_weight'] = normalize_weights(state_survey['weight'] * state_survey['lv_index'])\n\n options = ['A Democratic candidate', 'A Republican candidate', 'Another candidate', 'Not decided']\n survey_ = state_survey.loc[state_survey['vote_senate'].isin(options)].copy()\n survey_['weight'] = normalize_weights(survey_['weight'])\n survey_['lv_weight'] = normalize_weights(survey_['lv_weight'])\n\n votes = survey_['vote_senate'].value_counts(normalize=True) * survey_.groupby('vote_senate')['lv_weight'].mean() * 100\n votes = votes[options] * (100 / votes[options].sum())\n print(votes)\n print('-')\n print_result_sen(**calc_result_sen(dem_vote=votes['A Democratic candidate'],\n rep_vote=votes['A Republican candidate'],\n n=lv_weighted_n))\n print('-')",
"## ALABAMA ##\nA Democratic candidate 37.243159\nA Republican candidate 51.746655\nAnother candidate 1.699965\nNot decided 9.310221\ndtype: float64\n-\nResult Dem Sen -14.5 (80% CI: -24.9 to -4.0) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.8pts) (Dem Sen 12.2% likely to win)\nDem -14.5 (80% CI: -24.9 to -4.0) (12.2% Dem)\n-\n-\n## ALASKA ##\nA Democratic candidate 40.627153\nA Republican candidate 47.731694\nAnother candidate 2.004306\nNot decided 9.636847\ndtype: float64\n-\nResult Dem Sen -7.1 (80% CI: -17.8 to 3.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.5pts, sigma=9.0pts) (Dem Sen 28.7% likely to win)\nDem -7.1 (80% CI: -17.8 to 3.5) (28.7% Dem)\n-\n-\n## ARIZONA ##\nA Democratic candidate 44.940957\nA Republican candidate 44.005564\nAnother candidate 1.856676\nNot decided 9.196803\ndtype: float64\n-\nResult Dem Sen 0.9 (80% CI: -9.4 to 11.4) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 53.0% likely to win)\nDem 0.9 (80% CI: -9.4 to 11.4) (53.0% Dem)\n-\n-\n## ARKANSAS ##\nA Democratic candidate 37.267341\nA Republican candidate 51.543264\nAnother candidate 1.769358\nNot decided 9.420038\ndtype: float64\n-\nResult Dem Sen -14.3 (80% CI: -24.7 to -3.7) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 12.8% likely to win)\nDem -14.3 (80% CI: -24.7 to -3.7) (12.8% Dem)\n-\n-\n## COLORADO ##\nA Democratic candidate 47.532175\nA Republican candidate 41.275127\nAnother candidate 1.957387\nNot decided 9.235311\ndtype: float64\n-\nResult Dem Sen 6.3 (80% CI: -4.3 to 16.8) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 69.2% likely to win)\nDem 6.3 (80% CI: -4.3 to 16.8) (69.2% Dem)\n-\n-\n## DELAWARE ##\nA Democratic candidate 50.001303\nA Republican candidate 39.139307\nAnother candidate 1.874066\nNot decided 8.985324\ndtype: float64\n-\nResult Dem Sen 10.9 (80% CI: 0.5 to 21.3) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 81.1% likely to win)\nDem 10.9 (80% CI: 0.5 to 21.3) (81.1% Dem)\n-\n-\n## GEORGIA ##\nA Democratic candidate 44.639146\nA Republican candidate 44.512385\nAnother candidate 1.772305\nNot decided 9.076164\ndtype: float64\n-\nResult Dem Sen 0.1 (80% CI: -10.2 to 10.6) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 50.5% likely to win)\nDem 0.1 (80% CI: -10.2 to 10.6) (50.5% Dem)\n-\n-\n## IDAHO ##\nA Democratic candidate 34.841580\nA Republican candidate 53.305284\nAnother candidate 1.980606\nNot decided 9.872530\ndtype: float64\n-\nResult Dem Sen -18.5 (80% CI: -29.3 to -7.8) (Weighted N=1164) (raw_moe=3.7pts, margin=11.6pts, sigma=9.0pts) (Dem Sen 7.4% likely to win)\nDem -18.5 (80% CI: -29.3 to -7.8) (7.4% Dem)\n-\n-\n## ILLINOIS ##\nA Democratic candidate 52.079625\nA Republican candidate 37.016897\nAnother candidate 1.928842\nNot decided 8.974636\ndtype: float64\n-\nResult Dem Sen 15.1 (80% CI: 4.7 to 25.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 88.9% likely to win)\nDem 15.1 (80% CI: 4.7 to 25.5) (88.9% Dem)\n-\n-\n## IOWA ##\nA Democratic candidate 42.877245\nA Republican candidate 45.949897\nAnother candidate 1.865062\nNot decided 9.307796\ndtype: float64\n-\nResult Dem Sen -3.1 (80% CI: -13.6 to 7.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 40.3% likely to win)\nDem -3.1 (80% CI: -13.6 to 7.5) (40.3% Dem)\n-\n-\n## KANSAS ##\nA Democratic candidate 39.196681\nA Republican candidate 49.540885\nAnother candidate 1.832563\nNot decided 9.429871\ndtype: float64\n-\nResult Dem Sen -10.3 (80% CI: -20.8 to 0.3) (Weighted N=1164) (raw_moe=3.7pts, margin=11.4pts, sigma=8.9pts) (Dem Sen 20.7% likely to win)\nDem -10.3 (80% CI: -20.8 to 0.3) (20.7% Dem)\n-\n-\n## KENTUCKY ##\nA Democratic candidate 36.429020\nA Republican candidate 52.442459\nAnother candidate 1.730684\nNot decided 9.397837\ndtype: float64\n-\nResult Dem Sen -16.0 (80% CI: -26.5 to -5.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 10.1% likely to win)\nDem -16.0 (80% CI: -26.5 to -5.5) (10.1% Dem)\n-\n-\n## LOUISIANA ##\nA Democratic candidate 39.919507\nA Republican candidate 49.142924\nAnother candidate 1.720718\nNot decided 9.216851\ndtype: float64\n-\nResult Dem Sen -9.2 (80% CI: -19.7 to 1.1) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.8pts) (Dem Sen 22.4% likely to win)\nDem -9.2 (80% CI: -19.7 to 1.1) (22.4% Dem)\n-\n-\n## MAINE ##\nA Democratic candidate 46.979619\nA Republican candidate 41.921812\nAnother candidate 1.911116\nNot decided 9.187452\ndtype: float64\n-\nResult Dem Sen 5.1 (80% CI: -5.6 to 15.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 65.6% likely to win)\nDem 5.1 (80% CI: -5.6 to 15.5) (65.6% Dem)\n-\n-\n## MASSACHUSETTS ##\nA Democratic candidate 55.130056\nA Republican candidate 33.959808\nAnother candidate 1.986903\nNot decided 8.923233\ndtype: float64\n-\nResult Dem Sen 21.2 (80% CI: 10.7 to 31.6) (Weighted N=1164) (raw_moe=3.6pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 95.5% likely to win)\nDem 21.2 (80% CI: 10.7 to 31.6) (95.5% Dem)\n-\n-\n## MICHIGAN ##\nA Democratic candidate 46.143288\nA Republican candidate 42.911608\nAnother candidate 1.836853\nNot decided 9.108251\ndtype: float64\n-\nResult Dem Sen 3.2 (80% CI: -7.2 to 13.7) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.8pts) (Dem Sen 60.5% likely to win)\nDem 3.2 (80% CI: -7.2 to 13.7) (60.5% Dem)\n-\n-\n## MINNESOTA ##\nA Democratic candidate 46.382774\nA Republican candidate 42.390116\nAnother candidate 1.949715\nNot decided 9.277395\ndtype: float64\n-\nResult Dem Sen 4.0 (80% CI: -6.6 to 14.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.4pts, sigma=8.9pts) (Dem Sen 62.3% likely to win)\nDem 4.0 (80% CI: -6.6 to 14.5) (62.3% Dem)\n-\n-\n## MISSISSIPPI ##\nA Democratic candidate 40.652436\nA Republican candidate 48.539188\nAnother candidate 1.684344\nNot decided 9.124031\ndtype: float64\n-\nResult Dem Sen -7.9 (80% CI: -18.3 to 2.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 26.0% likely to win)\nDem -7.9 (80% CI: -18.3 to 2.5) (26.0% Dem)\n-\n-\n## MONTANA ##\nA Democratic candidate 40.176366\nA Republican candidate 48.705292\nAnother candidate 1.794946\nNot decided 9.323396\ndtype: float64\n-\nResult Dem Sen -8.5 (80% CI: -19.1 to 2.0) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 24.8% likely to win)\nDem -8.5 (80% CI: -19.1 to 2.0) (24.8% Dem)\n-\n-\n## NEBRASKA ##\nA Democratic candidate 37.681466\nA Republican candidate 50.998075\nAnother candidate 1.827321\nNot decided 9.493138\ndtype: float64\n-\nResult Dem Sen -13.3 (80% CI: -23.9 to -2.8) (Weighted N=1164) (raw_moe=3.7pts, margin=11.4pts, sigma=8.9pts) (Dem Sen 14.2% likely to win)\nDem -13.3 (80% CI: -23.9 to -2.8) (14.2% Dem)\n-\n-\n## NEW HAMPSHIRE ##\nA Democratic candidate 46.230763\nA Republican candidate 42.727965\nAnother candidate 1.875446\nNot decided 9.165826\ndtype: float64\n-\nResult Dem Sen 3.5 (80% CI: -6.9 to 14.0) (Weighted N=1164) (raw_moe=3.7pts, margin=11.3pts, sigma=8.8pts) (Dem Sen 61.0% likely to win)\nDem 3.5 (80% CI: -6.9 to 14.0) (61.0% Dem)\n-\n-\n## NEW JERSEY ##\nA Democratic candidate 51.080987\nA Republican candidate 38.254872\nAnother candidate 1.818585\nNot decided 8.845556\ndtype: float64\n-\nResult Dem Sen 12.8 (80% CI: 2.5 to 23.1) (Weighted N=1164) (raw_moe=3.7pts, margin=11.1pts, sigma=8.7pts) (Dem Sen 85.2% likely to win)\nDem 12.8 (80% CI: 2.5 to 23.1) (85.2% Dem)\n-\n-\n## NEW MEXICO ##\nA Democratic candidate 48.287843\nA Republican candidate 40.311042\nAnother candidate 2.051326\nNot decided 9.349788\ndtype: float64\n-\nResult Dem Sen 8.0 (80% CI: -2.6 to 18.6) (Weighted N=1164) (raw_moe=3.7pts, margin=11.4pts, sigma=8.9pts) (Dem Sen 73.7% likely to win)\nDem 8.0 (80% CI: -2.6 to 18.6) (73.7% Dem)\n-\n-\n## NORTH CAROLINA ##\nA Democratic candidate 45.129845\nA Republican candidate 44.000173\nAnother candidate 1.789510\nNot decided 9.080472\ndtype: float64\n-\nResult Dem Sen 1.1 (80% CI: -9.3 to 11.5) (Weighted N=1164) (raw_moe=3.7pts, margin=11.2pts, sigma=8.7pts) (Dem Sen 53.5% likely to win)\nDem 1.1 (80% CI: -9.3 to 11.5) (53.5% Dem)\n-\n-\n## OKLAHOMA ##\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a25536727405615e8dd2159fc4e012235c0ef2e
| 38,872 |
ipynb
|
Jupyter Notebook
|
Live-Twitter-Sentiments-for-Cryptocurrencies.ipynb
|
muntisa/Twitter-Sentiment-Analysis-for-CoinMarketCap
|
f5249b158790b4df49d1a57c69aee777362e58d7
|
[
"Apache-2.0"
] | 11 |
2018-11-04T16:58:08.000Z
|
2022-01-29T12:54:05.000Z
|
Live-Twitter-Sentiments-for-Cryptocurrencies.ipynb
|
ult-processor/Twitter-Sentiment-Analysis-for-CoinMarketCap
|
f5249b158790b4df49d1a57c69aee777362e58d7
|
[
"Apache-2.0"
] | null | null | null |
Live-Twitter-Sentiments-for-Cryptocurrencies.ipynb
|
ult-processor/Twitter-Sentiment-Analysis-for-CoinMarketCap
|
f5249b158790b4df49d1a57c69aee777362e58d7
|
[
"Apache-2.0"
] | 4 |
2020-10-29T12:23:17.000Z
|
2021-12-29T16:03:49.000Z
| 142.911765 | 28,848 | 0.860697 |
[
[
[
"# Live Twitter Sentiments for Cryptocurrencies\n\nPlot the evolution in time of the tweets sentiment for a cryptocurrency. We will use the *tweepy*'s streaming to see the live evolution of the Twitter sentiments for the cryptocurrencies.\n\n* *Inputs*: currency keywords to seach in Twitter, number of tweets to analyse the sentiement, plot update interval in seconds (default = 1.0 seconds).\n* *Output*: Plot with sentiment analysis and the mean in time for a specific cryptocurrency.\n* *Note*: The free Twitter plan lets you download *100 Tweets per search*, and you can search Tweets from the previous seven days. *Please check the limits of getting tweets per day or month before to use this script!*\n\n### Requirements\n* *Language*: Python 3.*\n* *Dependencies*: tweepy = retrieve tweets using APIs; json = handling the API results, textblob = text operations and sentiment analysis, re = text processing, matplotlib = plots, numpy = numerical calculations, IPython = interactive plots into notebooks\n* *Other tools*: Textblog Corpora for text processing: *python -m textblob.download_corpora*\n\n## How to use\nComplete your twitter API credential and your crypto keywords, number of tweets and run the entire notebook.\n\n## Step 1: Import the python dependencies",
"_____no_output_____"
]
],
[
[
"import time, json, re\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener \nfrom textblob import TextBlob\nimport matplotlib.pyplot as plt \nimport numpy as np\nfrom IPython.display import clear_output\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Step 2: Define your data\n\nYou need to define the keywords, number of tweets, the update interval, and your twitter API keys. Your can define the key here or read them from a JSON file.",
"_____no_output_____"
]
],
[
[
"# YOUR preference (to complete)\nkeywords = [\"Bitcoin\", 'BTC'] # a set of keywords for a crypto\nnoTweets = 10 # number of tweets/connections\nsecUpdate = 1.0 # update interval in seconds \n\n# YOUR Twitter API information (to complete)\n# if you have a local file with your info, ommit these lines\nCONSUMER_KEY = 'YOUR DATA'\nCONSUMER_SECRET = 'YOUR DATA'\nACCESS_TOKEN = 'YOUR DATA'\nACCESS_SECRET = 'YOUR DATA'\n\n# Setting a JSON of your credentials (to complete)\ncreds = {\"CONSUMER_KEY\": CONSUMER_KEY, \"CONSUMER_SECRET\": CONSUMER_SECRET,\n \"ACCESS_TOKEN\": ACCESS_TOKEN, \"ACCESS_SECRET\": ACCESS_SECRET}\n\n# If you didnt define above, load credentials from json file\n# (overwrite creds with data from file if available)\ntry:\n print('-> Reading Twitter API credentials from file ... ')\n with open(\"twitter_credentials.json\", \"r\") as file:\n creds = json.load(file)\n print('Done!')\nexcept:\n print('! There is no twitter API credential file! Using the information you defined above!')",
"-> Reading Twitter API credentials from file ... \nDone!\n"
]
],
[
[
"## Step 3: Define a custom class for Twitter streaming\n\nWe will use some variables as globals in order to input parameters from the main code (currency keywords to seach in Twitter, number of tweets to analyse the sentiement, plot refresh time) and to fill list with tweets sentiment, times of the sentiment analysis and means of the sentiments at a specific time. These list will be used to interactivelly plot the evolution of the sentiment and the mean of sentiments.",
"_____no_output_____"
]
],
[
[
"class listener(StreamListener):\n def on_data(self,data):\n global initime # to calculate the time of analysis\n global inidatetime # to print the initial datetime\n global count # counting the tweets\n global t # list with the time of sentiment analysis\n global sent # list with sentiments at moments t\n global sentMeans # list of sentiment means at different time\n \n global keywords # external - list with keywords for a crypto\n global noTweets # external - number of tweets to get with your twitter API \n global secUpdate # external - number of seconds to update the plot\n \n # update the list for analysis time\n currTime = int(time.time()-initime)\n t.append(currTime)\n \n # get the tweet data\n all_data=json.loads(data)\n \n # encode to unicode for different types of characters\n tweet=all_data[\"text\"].encode(\"utf-8\")\n \n # remove URLs from tweets\n tweet = re.sub(r\"http\\S+\", \"\", str(tweet))\n # remove strange characters from the tweet\n tweet=\" \".join(re.findall(\"[a-zA-Z]+\", str(tweet)))\n \n # strip the spaces from the tweet\n blob=TextBlob(tweet.strip())\n \n # count the tweets\n count=count+1\n \n # update the list for sentiments and the means at different time \n sent.append(blob.sentiment.polarity)\n sentMeans.append(np.mean(sent))\n \n # Plotting sentiment analysis in time for a cryptocurrency\n # clear the plot\n clear_output(wait=True)\n # set axis, labels\n plt.xlabel('Time')\n plt.ylabel('Twitter sentiment')\n # set grid\n plt.grid()\n \n # print the current mean of sentiments\n print('Live Twitter sentiment analysis for cryptocurrencies')\n print('**********************************************************************')\n print('From: '+str(inidatetime)+' To: '+str(time.ctime()))\n print('Sentiment Mean for '+str(keywords)+': '+str(np.mean(sent)))\n \n # plot sentiments and means in time\n plt.plot(t,sent, t,sentMeans)\n \n # add legend\n plt.legend(['Sentiment', 'Sentiment Mean'],loc='center left', bbox_to_anchor=(1, 0.5))\n # plotting\n plt.show()\n \n # wait for update\n plt.pause(secUpdate) # wait 1 sec!\n \n # if we have the number of tweets, end the script\n if count==noTweets:\n return False\n else:\n return True\n \n def on_error(self,status):\n print(status)",
"_____no_output_____"
]
],
[
[
"## Step 4: Run the Twitter stream for sentiment analysis\n\nInitialize all the variables and use the tweets stream for sentiment analysis plotting:",
"_____no_output_____"
]
],
[
[
"# Define external variables to be used inside the streaming class\nt = [0] # list with time\nsent = [0] # list with tweets sentiment in time\nsentMeans = [0] # list with means of sentiment in time\ncount=0 # curent number of tweet\ninitime=time.time() # to calculate the time\ninidatetime = time.ctime() # initial date time in readable format\n\n# setup the twitter screaming\nauth=OAuthHandler(creds['CONSUMER_KEY'],creds['CONSUMER_SECRET'])\nauth.set_access_token(creds['ACCESS_TOKEN'],creds['ACCESS_SECRET'])\n\n# start the stream with tweets using your keyworks\ntwitterStream = Stream(auth, listener(count))\ntwitterStream.filter(track=keywords)",
"_____no_output_____"
]
],
[
[
"### Hint\nYou can use this notebook for any twitter search, not limited to the cryptocurrencies!\n\nHf!\n\n2018@muntisa",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a2553783be0b441a70fc302ab8921da861d2a65
| 34,851 |
ipynb
|
Jupyter Notebook
|
guided_examples/cnn_mnist.ipynb
|
akram1026/ieee_ml
|
3d351e3e1162b0a5a0e153a647d26911365fc873
|
[
"MIT"
] | null | null | null |
guided_examples/cnn_mnist.ipynb
|
akram1026/ieee_ml
|
3d351e3e1162b0a5a0e153a647d26911365fc873
|
[
"MIT"
] | null | null | null |
guided_examples/cnn_mnist.ipynb
|
akram1026/ieee_ml
|
3d351e3e1162b0a5a0e153a647d26911365fc873
|
[
"MIT"
] | null | null | null | 109.59434 | 2,207 | 0.68219 |
[
[
[
"# CNN MNIST",
"_____no_output_____"
]
],
[
[
"#importing functions from python3 to python2\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n#importing numpy and tensorflow\nimport numpy as np\nimport tensorflow as tf\n\n#ignore all the warnings and don't show them in the notebook\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#treshold on what messages are to be logged\ntf.logging.set_verbosity(tf.logging.INFO)\n#importing debug library\nfrom tensorflow.python import debug as tf_debug",
"_____no_output_____"
]
],
[
[
"## Debugger\n\n### Uncomment the below line and execute the code to run the debugger.\n\n### Go to the link once you start execution \t\t\thttp://localhost:6006/",
"_____no_output_____"
]
],
[
[
"#Uncomment the below line to run the debugger\n#Add monitor=[hook] as a parameter to the estimators below\nhook = tf_debug.TensorBoardDebugHook(\"localhost:6064\")\n#RPCs have a max payload size of 4194304 bytes (by default). \n#Your program is raising an exception when debugger logic (within TensorBoardDebugHook) tries to send the graph\n#of the model to TensorBoard. Perhaps the graph is large.For now, it is OK to prevent the debugger from \n#sending the graph (and python tracebacks) to TensorBoard? Via setting send_traceback_and_source_code to False.",
"_____no_output_____"
],
[
"def cnn_model_fn(features, labels, mode):\n \"\"\"Model function for CNN.\"\"\"\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n # MNIST images are 28x28 pixels, and have one color channel\n input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\n # Convolutional Layer #1\n # Computes 32 features using a 5x5 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 28, 28, 1]\n # Output Tensor Shape: [batch_size, 28, 28, 32]\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 28, 28, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 32]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2\n # Computes 64 features using a 5x5 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 14, 14, 32]\n # Output Tensor Shape: [batch_size, 14, 14, 64]\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[5, 5],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2x2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 14, 14, 64]\n # Output Tensor Shape: [batch_size, 7, 7, 64]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 7, 7, 64]\n # Output Tensor Shape: [batch_size, 7 * 7 * 64]\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 7 * 7 * 64]\n # Output Tensor Shape: [batch_size, 1024]\n dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n # Add dropout operation; 0.6 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 10]\n logits = tf.layers.dense(inputs=dropout, units=10)\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n }\n #if predict mode, run the estimator and return the values\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)",
"_____no_output_____"
],
[
"def main(unused_argv):\n # Load training data\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = mnist.train.images # Returns np.array\n #load the the train data labels\n train_labels = np.asarray(mnist.train.labels, dtype=np.int32)\n #load the test data\n eval_data = mnist.test.images # Returns np.array\n #load the test data labels\n eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)\n \n # Create the Estimator\n mnist_classifier = tf.estimator.Estimator(\n model_fn=cnn_model_fn, model_dir=\"/tmp/mnist_convnet_model\")\n\n # Set up logging for predictions\n # Log the values in the \"Softmax\" tensor with label \"probabilities\"\n tensors_to_log = {\"probabilities\": \"softmax_tensor\"}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=50)\n\n # Train the model\n train_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": train_data},\n y=train_labels,\n batch_size=100,\n num_epochs=None,\n shuffle=True)\n mnist_classifier.train(\n input_fn=train_input_fn,\n steps=200,\n hooks=[hook]) #[logging_hook]\n\n # Evaluate the model and print results\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print(eval_results)\n",
"_____no_output_____"
],
[
"#Run the model, steps set to 200 instead of 20000 as the execution time was large\n#Changing steps back to 20000 in model training results in an accuracy of 97%\nif __name__ == \"__main__\":\n tf.app.run()\n",
"Extracting MNIST-data\\train-images-idx3-ubyte.gz\nExtracting MNIST-data\\train-labels-idx1-ubyte.gz\nExtracting MNIST-data\\t10k-images-idx3-ubyte.gz\nExtracting MNIST-data\\t10k-labels-idx1-ubyte.gz\nINFO:tensorflow:Using default config.\nINFO:tensorflow:Using config: {'_model_dir': '/tmp/mnist_convnet_model', '_tf_random_seed': None, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 100, '_train_distribute': None, '_device_fn': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x000002BE8826BEB8>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Restoring parameters from /tmp/mnist_convnet_model\\model.ckpt-200\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 200 into /tmp/mnist_convnet_model\\model.ckpt.\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a25575275f5c26c316cd27732124017da805a29
| 114,926 |
ipynb
|
Jupyter Notebook
|
assignments/.ipynb_checkpoints/ad470-assignment-7-checkpoint.ipynb
|
RealDogDad/ad450-data-science
|
3108610ecc2f902286fca49bf7af91e3203872ed
|
[
"Apache-2.0"
] | 1 |
2021-06-03T01:40:17.000Z
|
2021-06-03T01:40:17.000Z
|
assignments/.ipynb_checkpoints/ad470-assignment-7-checkpoint.ipynb
|
RealDogDad/exoplanetary-extrapolation
|
3108610ecc2f902286fca49bf7af91e3203872ed
|
[
"Apache-2.0"
] | null | null | null |
assignments/.ipynb_checkpoints/ad470-assignment-7-checkpoint.ipynb
|
RealDogDad/exoplanetary-extrapolation
|
3108610ecc2f902286fca49bf7af91e3203872ed
|
[
"Apache-2.0"
] | 2 |
2021-01-27T04:21:52.000Z
|
2021-05-06T17:12:31.000Z
| 91.428799 | 24,416 | 0.770183 |
[
[
[
"### AD470 - Module 7 Introduction to Deep LearningProgramming Assignment\n#### Andrew Boyer\n#### Brandan Owens",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom sklearn.preprocessing import StandardScaler\n\nimport tensorflow\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
]
],
[
[
"#### Q.1(a) Use pandas to read in the dataset “Churn_Modelling.csv”",
"_____no_output_____"
]
],
[
[
"churn_df = pd.read_csv(\"../dataFiles/Churn_Modelling.csv\")\nchurn_df.columns",
"_____no_output_____"
]
],
[
[
"#### (b) Create the following bar plots.",
"_____no_output_____"
]
],
[
[
"sns.countplot(data = churn_df, x = 'Exited' )",
"_____no_output_____"
],
[
"sns.countplot(data = churn_df , x = 'Geography', hue = 'Exited')",
"_____no_output_____"
],
[
"sns.barplot(data=churn_df , x= 'Geography', y= 'Balance')",
"_____no_output_____"
]
],
[
[
"#### (c) From the dataframe, find the percentage of people who exited, and the percentage of people who did not exit.",
"_____no_output_____"
]
],
[
[
"churn_df['Exited'].value_counts()/churn_df['Exited'].count()*100",
"_____no_output_____"
]
],
[
[
"#### (d) Check for any missing values in the dataframe.",
"_____no_output_____"
]
],
[
[
"churn_df.isnull().values.any()",
"_____no_output_____"
]
],
[
[
"#### (e) Define X and y",
"_____no_output_____"
]
],
[
[
"X = churn_df.drop(['RowNumber', 'CustomerId', 'Surname', 'Exited'], axis=1)\ny = churn_df['Exited']",
"_____no_output_____"
]
],
[
[
"#### (f) Get dummies for all categorical variables of X, remember to set drop_first = True.",
"_____no_output_____"
]
],
[
[
"X = pd.get_dummies(X, drop_first = True)\nX",
"_____no_output_____"
]
],
[
[
"#### (g) Split the dataset into training set and test set. test_size=0.2, random_state=0",
"_____no_output_____"
]
],
[
[
"X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)",
"_____no_output_____"
]
],
[
[
"#### (h) Use the following codes to do the feature scaling on the training and test sets. (Standardize all numerical variables by subtracting the means and dividing each variable by its standard deviation.)",
"_____no_output_____"
]
],
[
[
"sc_x = StandardScaler()\nX_train = pd.DataFrame(sc_x.fit_transform(X_train), columns=X.columns.values)\nX_test = pd.DataFrame(sc_x.transform(X_test), columns=X.columns.values)",
"_____no_output_____"
]
],
[
[
"#### (i) Build a 4-layer neural network.",
"_____no_output_____"
]
],
[
[
"#model = keras.Sequential([\n# layers.Dense(6, activation='relu', input_shape=[11]),\n# layers.Dense(12, activation='relu'),\n# layers.Dense(24, activation='relu'),\n# layers.Dense(1, activation='sigmoid'),\n#])\n\nmodel = Sequential()\nmodel.add(Dense(6, input_shape=(11,), activation='relu'))\nmodel.add(Dense(12, activation='relu'))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n",
"_____no_output_____"
],
[
"model.summary()",
"Model: \"sequential\"\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\ndense (Dense) (None, 6) 72 \n_________________________________________________________________\ndense_1 (Dense) (None, 12) 84 \n_________________________________________________________________\ndense_2 (Dense) (None, 24) 312 \n_________________________________________________________________\ndense_3 (Dense) (None, 1) 25 \n=================================================================\nTotal params: 493\nTrainable params: 493\nNon-trainable params: 0\n_________________________________________________________________\n"
]
],
[
[
"#### (j) Compile the neural network.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam', \n loss = 'binary_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
],
[
"#model.summary()\n#x_partial_train = X_train[:100]\n#y_partial_train = y_train[:100]\n#x_val = X_train[100:]\n#y_val = y_train[100:]",
"_____no_output_____"
]
],
[
[
"#### (k) Fit the model on training set. Set the batch_size =10, run for 100 epochs.",
"_____no_output_____"
]
],
[
[
"history = model.fit(\n X_train, y_train,\n validation_data=(X_test,y_test),\n epochs=100, \n batch_size =10,\n)",
"Epoch 1/100\n800/800 [==============================] - 2s 2ms/step - loss: 0.5089 - accuracy: 0.7848 - val_loss: 0.4201 - val_accuracy: 0.8200\nEpoch 2/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.4052 - accuracy: 0.8314 - val_loss: 0.4056 - val_accuracy: 0.8250\nEpoch 3/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.4029 - accuracy: 0.8297 - val_loss: 0.4043 - val_accuracy: 0.8255\nEpoch 4/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.4110 - accuracy: 0.8226 - val_loss: 0.3945 - val_accuracy: 0.8285\nEpoch 5/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3967 - accuracy: 0.8232 - val_loss: 0.3874 - val_accuracy: 0.8290\nEpoch 6/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3913 - accuracy: 0.8284 - val_loss: 0.3812 - val_accuracy: 0.8370\nEpoch 7/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3864 - accuracy: 0.8353 - val_loss: 0.3735 - val_accuracy: 0.8430\nEpoch 8/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3680 - accuracy: 0.8423 - val_loss: 0.3656 - val_accuracy: 0.8485\nEpoch 9/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3714 - accuracy: 0.8442 - val_loss: 0.3589 - val_accuracy: 0.8540\nEpoch 10/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3636 - accuracy: 0.8450 - val_loss: 0.3535 - val_accuracy: 0.8540\nEpoch 11/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3670 - accuracy: 0.8436 - val_loss: 0.3516 - val_accuracy: 0.8560\nEpoch 12/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3589 - accuracy: 0.8525 - val_loss: 0.3469 - val_accuracy: 0.8585\nEpoch 13/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3586 - accuracy: 0.8488 - val_loss: 0.3562 - val_accuracy: 0.8475\nEpoch 14/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3575 - accuracy: 0.8528 - val_loss: 0.3469 - val_accuracy: 0.8620\nEpoch 15/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3565 - accuracy: 0.8578 - val_loss: 0.3444 - val_accuracy: 0.8595\nEpoch 16/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3665 - accuracy: 0.8505 - val_loss: 0.3502 - val_accuracy: 0.8530\nEpoch 17/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3424 - accuracy: 0.8631 - val_loss: 0.3432 - val_accuracy: 0.8615\nEpoch 18/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3373 - accuracy: 0.8644 - val_loss: 0.3408 - val_accuracy: 0.8670\nEpoch 19/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3647 - accuracy: 0.8489 - val_loss: 0.3410 - val_accuracy: 0.8660\nEpoch 20/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3406 - accuracy: 0.8617 - val_loss: 0.3432 - val_accuracy: 0.8655\nEpoch 21/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3499 - accuracy: 0.8568 - val_loss: 0.3440 - val_accuracy: 0.8650\nEpoch 22/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3457 - accuracy: 0.8592 - val_loss: 0.3419 - val_accuracy: 0.8605\nEpoch 23/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3403 - accuracy: 0.8589 - val_loss: 0.3458 - val_accuracy: 0.8615\nEpoch 24/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3508 - accuracy: 0.8539 - val_loss: 0.3408 - val_accuracy: 0.8620\nEpoch 25/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3258 - accuracy: 0.8684 - val_loss: 0.3505 - val_accuracy: 0.8535\nEpoch 26/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3356 - accuracy: 0.8625 - val_loss: 0.3460 - val_accuracy: 0.8620\nEpoch 27/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3419 - accuracy: 0.8601 - val_loss: 0.3388 - val_accuracy: 0.8630\nEpoch 28/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3385 - accuracy: 0.8620 - val_loss: 0.3421 - val_accuracy: 0.8615\nEpoch 29/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3301 - accuracy: 0.8627 - val_loss: 0.3419 - val_accuracy: 0.8610\nEpoch 30/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3301 - accuracy: 0.8656 - val_loss: 0.3409 - val_accuracy: 0.8595\nEpoch 31/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3342 - accuracy: 0.8614 - val_loss: 0.3367 - val_accuracy: 0.8640\nEpoch 32/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3215 - accuracy: 0.8673 - val_loss: 0.3509 - val_accuracy: 0.8570\nEpoch 33/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3187 - accuracy: 0.8715 - val_loss: 0.3501 - val_accuracy: 0.8575\nEpoch 34/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3387 - accuracy: 0.8651 - val_loss: 0.3403 - val_accuracy: 0.8650\nEpoch 35/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3292 - accuracy: 0.8657 - val_loss: 0.3387 - val_accuracy: 0.8640\nEpoch 36/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3402 - accuracy: 0.8601 - val_loss: 0.3380 - val_accuracy: 0.8650\nEpoch 37/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3389 - accuracy: 0.8641 - val_loss: 0.3364 - val_accuracy: 0.8635\nEpoch 38/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3362 - accuracy: 0.8640 - val_loss: 0.3479 - val_accuracy: 0.8560\nEpoch 39/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3293 - accuracy: 0.8687 - val_loss: 0.3377 - val_accuracy: 0.8655\nEpoch 40/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3385 - accuracy: 0.8617 - val_loss: 0.3427 - val_accuracy: 0.8605\nEpoch 41/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3340 - accuracy: 0.8635 - val_loss: 0.3378 - val_accuracy: 0.8635\nEpoch 42/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3371 - accuracy: 0.8593 - val_loss: 0.3460 - val_accuracy: 0.8575\nEpoch 43/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3400 - accuracy: 0.8548 - val_loss: 0.3388 - val_accuracy: 0.8605\nEpoch 44/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3309 - accuracy: 0.8654 - val_loss: 0.3374 - val_accuracy: 0.8610\nEpoch 45/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3183 - accuracy: 0.8682 - val_loss: 0.3374 - val_accuracy: 0.8580\nEpoch 46/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3263 - accuracy: 0.8642 - val_loss: 0.3388 - val_accuracy: 0.8620\nEpoch 47/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3313 - accuracy: 0.8655 - val_loss: 0.3373 - val_accuracy: 0.8600\nEpoch 48/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3286 - accuracy: 0.8650 - val_loss: 0.3379 - val_accuracy: 0.8600\nEpoch 49/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3246 - accuracy: 0.8697 - val_loss: 0.3399 - val_accuracy: 0.8600\nEpoch 50/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3251 - accuracy: 0.8675 - val_loss: 0.3372 - val_accuracy: 0.8640\nEpoch 51/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3191 - accuracy: 0.8715 - val_loss: 0.3399 - val_accuracy: 0.8640\nEpoch 52/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3391 - accuracy: 0.8590 - val_loss: 0.3396 - val_accuracy: 0.8630\nEpoch 53/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3246 - accuracy: 0.8673 - val_loss: 0.3405 - val_accuracy: 0.8600\nEpoch 54/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3349 - accuracy: 0.8649 - val_loss: 0.3385 - val_accuracy: 0.8595\nEpoch 55/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3183 - accuracy: 0.8691 - val_loss: 0.3359 - val_accuracy: 0.8620\nEpoch 56/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3265 - accuracy: 0.8676 - val_loss: 0.3388 - val_accuracy: 0.8565\nEpoch 57/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3384 - accuracy: 0.8635 - val_loss: 0.3411 - val_accuracy: 0.8585\nEpoch 58/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3296 - accuracy: 0.8617 - val_loss: 0.3370 - val_accuracy: 0.8645\nEpoch 59/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3274 - accuracy: 0.8673 - val_loss: 0.3382 - val_accuracy: 0.8620\nEpoch 60/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3178 - accuracy: 0.8717 - val_loss: 0.3389 - val_accuracy: 0.8610\nEpoch 61/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3116 - accuracy: 0.8739 - val_loss: 0.3377 - val_accuracy: 0.8615\nEpoch 62/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3270 - accuracy: 0.8672 - val_loss: 0.3398 - val_accuracy: 0.8595\nEpoch 63/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3261 - accuracy: 0.8685 - val_loss: 0.3346 - val_accuracy: 0.8615\nEpoch 64/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3289 - accuracy: 0.8647 - val_loss: 0.3382 - val_accuracy: 0.8615\nEpoch 65/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3214 - accuracy: 0.8700 - val_loss: 0.3377 - val_accuracy: 0.8640\nEpoch 66/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3406 - accuracy: 0.8553 - val_loss: 0.3381 - val_accuracy: 0.8635\nEpoch 67/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3239 - accuracy: 0.8679 - val_loss: 0.3348 - val_accuracy: 0.8620\nEpoch 68/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3311 - accuracy: 0.8654 - val_loss: 0.3414 - val_accuracy: 0.8610\nEpoch 69/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3308 - accuracy: 0.8632 - val_loss: 0.3398 - val_accuracy: 0.8585\nEpoch 70/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3194 - accuracy: 0.8685 - val_loss: 0.3391 - val_accuracy: 0.8595\nEpoch 71/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3251 - accuracy: 0.8672 - val_loss: 0.3358 - val_accuracy: 0.8650\nEpoch 72/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3361 - accuracy: 0.8619 - val_loss: 0.3396 - val_accuracy: 0.8595\nEpoch 73/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3292 - accuracy: 0.8641 - val_loss: 0.3368 - val_accuracy: 0.8615\nEpoch 74/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3189 - accuracy: 0.8705 - val_loss: 0.3362 - val_accuracy: 0.8595\nEpoch 75/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3287 - accuracy: 0.8660 - val_loss: 0.3417 - val_accuracy: 0.8585\nEpoch 76/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3328 - accuracy: 0.8640 - val_loss: 0.3384 - val_accuracy: 0.8620\nEpoch 77/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3221 - accuracy: 0.8669 - val_loss: 0.3396 - val_accuracy: 0.8615\nEpoch 78/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3268 - accuracy: 0.8670 - val_loss: 0.3364 - val_accuracy: 0.8615\nEpoch 79/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3308 - accuracy: 0.8636 - val_loss: 0.3379 - val_accuracy: 0.8665\nEpoch 80/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3298 - accuracy: 0.8636 - val_loss: 0.3408 - val_accuracy: 0.8625\nEpoch 81/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3262 - accuracy: 0.8666 - val_loss: 0.3442 - val_accuracy: 0.8550\nEpoch 82/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3285 - accuracy: 0.8684 - val_loss: 0.3413 - val_accuracy: 0.8570\nEpoch 83/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3247 - accuracy: 0.8639 - val_loss: 0.3356 - val_accuracy: 0.8630\nEpoch 84/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3279 - accuracy: 0.8677 - val_loss: 0.3404 - val_accuracy: 0.8580\nEpoch 85/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3239 - accuracy: 0.8673 - val_loss: 0.3405 - val_accuracy: 0.8595\nEpoch 86/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3264 - accuracy: 0.8699 - val_loss: 0.3516 - val_accuracy: 0.8560\nEpoch 87/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3157 - accuracy: 0.8732 - val_loss: 0.3408 - val_accuracy: 0.8610\nEpoch 88/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3256 - accuracy: 0.8681 - val_loss: 0.3388 - val_accuracy: 0.8660\nEpoch 89/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3248 - accuracy: 0.8679 - val_loss: 0.3446 - val_accuracy: 0.8630\nEpoch 90/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3313 - accuracy: 0.8604 - val_loss: 0.3397 - val_accuracy: 0.8590\nEpoch 91/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3259 - accuracy: 0.8649 - val_loss: 0.3411 - val_accuracy: 0.8640\nEpoch 92/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3315 - accuracy: 0.8625 - val_loss: 0.3388 - val_accuracy: 0.8640\nEpoch 93/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3259 - accuracy: 0.8656 - val_loss: 0.3391 - val_accuracy: 0.8605\nEpoch 94/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3235 - accuracy: 0.8704 - val_loss: 0.3410 - val_accuracy: 0.8590\nEpoch 95/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3252 - accuracy: 0.8658 - val_loss: 0.3410 - val_accuracy: 0.8610\nEpoch 96/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3226 - accuracy: 0.8682 - val_loss: 0.3396 - val_accuracy: 0.8580\nEpoch 97/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3281 - accuracy: 0.8639 - val_loss: 0.3421 - val_accuracy: 0.8595\nEpoch 98/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3144 - accuracy: 0.8709 - val_loss: 0.3395 - val_accuracy: 0.8605\nEpoch 99/100\n800/800 [==============================] - 1s 1ms/step - loss: 0.3234 - accuracy: 0.8728 - val_loss: 0.3363 - val_accuracy: 0.8645\nEpoch 100/100\n800/800 [==============================] - 1s 2ms/step - loss: 0.3178 - accuracy: 0.8741 - val_loss: 0.3393 - val_accuracy: 0.8615\n"
]
],
[
[
"#### (l) Evaluate the model on test set.",
"_____no_output_____"
]
],
[
[
"test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)",
"63/63 - 0s - loss: 0.3393 - accuracy: 0.8615\n"
],
[
"history_dict = history.history\nloss_values = history_dict['loss']\nval_loss_values = history_dict['val_loss']\n\nepochs = range(1, len(loss_values) + 1)\n\nplt.plot(epochs, loss_values, 'bo', label='Training Loss')\nplt.plot(epochs, val_loss_values, 'b', label='Validation Loss')\nplt.title('Training and Validation Loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"acc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\n\nplt.plot(epochs, acc, 'bo', label='Training Accuracy')\nplt.plot(epochs, val_acc, 'b', label='Validation Accuracy')\nplt.title('Training and Validation Accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### (m) Finally, predict the probability of y = Exited on the test set.",
"_____no_output_____"
]
],
[
[
"prediction = model.predict(X_test)\nprint(prediction)",
"[[0.22871271]\n [0.29181826]\n [0.16714689]\n ...\n [0.15074047]\n [0.18459535]\n [0.23527709]]\n"
],
[
"new_pred = (prediction > 0.6)\ntrue_count = np.count_nonzero(new_pred)\nprint(true_count/new_pred.size)\nprint(\"% of employees that have a 60% or greater chance of leaving the company\")\n\n",
"0.1225\n% of employees that have a 60% or greater chance of leaving the company\n"
]
],
[
[
"#### Q.2 (a) Download the file 'natural_images.zip', and extra the files.",
"_____no_output_____"
]
],
[
[
"import zipfile\nlocal_zip = \"../dataFiles/natural_images.zip\"\nzip_ref = zipfile.ZipFile(local_zip, 'r')\nzip_ref.extractall('natural_images')",
"_____no_output_____"
]
],
[
[
"#### (b) Use os.listdir to create a list of labels.",
"_____no_output_____"
]
],
[
[
"os.listdir(\"natural_images\")",
"_____no_output_____"
]
],
[
[
"#### (c) Display the first 5 images of each class. ",
"_____no_output_____"
]
],
[
[
"from IPython.display import Image, display\ndisplay(Image( image file))",
"_____no_output_____"
]
],
[
[
"#### (d) Create the following barplot.",
"_____no_output_____"
],
[
"#### (e) Use cv2.imread() to convert images into numpy array (X). Then, use cv2.resize(), so that each image has the size (32,32) Create an array which contains the label of each image (Y).",
"_____no_output_____"
],
[
"#### (f) Print the shape of images (X) and shape of labels (Y).",
"_____no_output_____"
],
[
"#### (g) Standardize X by dividing X by 255.",
"_____no_output_____"
],
[
"#### (h) Use LabelEncoder() to encode Y. Use to_categorical() covert Y into categorical numpy array.",
"_____no_output_____"
],
[
"#### (i) Split the data into training set and test set. test_size = 0.33, random_state = 46.",
"_____no_output_____"
],
[
"#### (j) But a CNN model- first layer is Conv2D, filters =32, kernel_size = (5,5), activation = relu.- second layer is MaxPool2D, pool_size = (2,2)- third layer is Conv2D, filters =64, kernel_size = (3,3), activation = relu.- fourth layer is MaxPool2D, pool_size = (2,2)- fifth layer to flatten the tensors.- sixth layer is Dense, output shape = 256, activation = relu.- seventh layer is Dense, output shape = 8, activation = softmax.",
"_____no_output_____"
],
[
"#### (k) Compile the modelloss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']",
"_____no_output_____"
],
[
"#### (l) fit the model, epochs = 25, validation_split = 0.2",
"_____no_output_____"
],
[
"#### (m)Plot the change in loss score on training set and validation set over epochs.",
"_____no_output_____"
],
[
"#### (n) Plot the change in accuracy on training set and validation set over epochs.",
"_____no_output_____"
],
[
"#### (o) Retrain the model using the entire training set and set epochs = 5. Evaluate the model on the test set.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a2558f880f40dd4ad4d3c076252310ac188a5bd
| 12,735 |
ipynb
|
Jupyter Notebook
|
Machine learning/Heart Diseases - Classification with Random Forest Classifier.ipynb
|
olgarozhdestvina/Data-Science-and-Machine-Learning
|
3d5b6ed5d20056458af540091aa5ac58cb5b2e44
|
[
"MIT"
] | null | null | null |
Machine learning/Heart Diseases - Classification with Random Forest Classifier.ipynb
|
olgarozhdestvina/Data-Science-and-Machine-Learning
|
3d5b6ed5d20056458af540091aa5ac58cb5b2e44
|
[
"MIT"
] | null | null | null |
Machine learning/Heart Diseases - Classification with Random Forest Classifier.ipynb
|
olgarozhdestvina/Data-Science-and-Machine-Learning
|
3d5b6ed5d20056458af540091aa5ac58cb5b2e44
|
[
"MIT"
] | null | null | null | 25.419162 | 95 | 0.409501 |
[
[
[
"## Intro to Scikit-learn",
"_____no_output_____"
]
],
[
[
"# import packages\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle \nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score",
"_____no_output_____"
],
[
"# Get the data ready\nheart = pd.read_csv('../Data/heart.csv')\nheart.head()",
"_____no_output_____"
],
[
"# create features matrix\nX = heart.drop('target', axis =1)\n\n# create labels\ny = heart.target",
"_____no_output_____"
],
[
"# choose the right model and hyperparameters\nclf = RandomForestClassifier()\n\n# keep the default hyperparameters\nclf.get_params()",
"_____no_output_____"
],
[
"# fit the model to the data\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=49)",
"_____no_output_____"
],
[
"# fit it\nclf.fit(X_train, y_train);\n\n# make a predition\ny_pred = clf.predict(X_test)\ny_pred",
"_____no_output_____"
],
[
"# Evaluate the model on the training and test data\nclf.score(X_train, y_train), clf.score(X_test, y_test)",
"_____no_output_____"
],
[
"print(classification_report(y_test, y_pred))",
" precision recall f1-score support\n\n 0 0.94 0.85 0.89 34\n 1 0.83 0.93 0.88 27\n\n accuracy 0.89 61\n macro avg 0.88 0.89 0.88 61\nweighted avg 0.89 0.89 0.89 61\n\n"
],
[
"confusion_matrix(y_test, y_pred)",
"_____no_output_____"
],
[
"accuracy_score(y_test, y_pred)",
"_____no_output_____"
],
[
"# Improve the performance of the model\n# Try different amount of n-estimators\nnp.random.seed(42)\nfor i in range(10, 140, 20):\n print(f'Trying model with {i} estimators')\n clf = RandomForestClassifier(i)\n clf.fit(X_train, y_train)\n print(f'Model accuracy on test set: {clf.score(X_test, y_test)*100:.2f}%\\n')",
"Trying model with 10 estimators\nModel accuracy on test set: 78.69%\n\nTrying model with 30 estimators\nModel accuracy on test set: 85.25%\n\nTrying model with 50 estimators\nModel accuracy on test set: 81.97%\n\nTrying model with 70 estimators\nModel accuracy on test set: 85.25%\n\nTrying model with 90 estimators\nModel accuracy on test set: 83.61%\n\nTrying model with 110 estimators\nModel accuracy on test set: 88.52%\n\nTrying model with 130 estimators\nModel accuracy on test set: 85.25%\n\n"
],
[
"# redo the model and save it \nclf = RandomForestClassifier(110)\nclf.fit(X_train, y_train)\npickle.dump(clf, open('random_forest_model_1.pkl', 'wb'))",
"_____no_output_____"
],
[
"# reload the model\nloaded_model = pickle.load(open('random_forest_model_1.pkl', 'rb'))\nprint(f'Model accuracy on test set: {loaded_model.score(X_test, y_test)*100:.2f}%\\n')",
"Model accuracy on test set: 85.25%\n\n"
]
],
[
[
"### Getting the data ready\n\nThree main steps:\n1. Split the data into features and labels\n2. Filling (imputing) or disregarding missing values\n3. Converting non-numerical values to numerical (feature encoding)",
"_____no_output_____"
],
[
"<br>\n\n___\n#### End.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a255f5be880edcb9758016fce97cfa6eb5cec8c
| 19,182 |
ipynb
|
Jupyter Notebook
|
example-sudoku.ipynb
|
leowalkling/qpth
|
84a2c650d60a553f45984549c3c1f021f961b874
|
[
"Apache-2.0"
] | null | null | null |
example-sudoku.ipynb
|
leowalkling/qpth
|
84a2c650d60a553f45984549c3c1f021f961b874
|
[
"Apache-2.0"
] | null | null | null |
example-sudoku.ipynb
|
leowalkling/qpth
|
84a2c650d60a553f45984549c3c1f021f961b874
|
[
"Apache-2.0"
] | null | null | null | 31.343137 | 243 | 0.524659 |
[
[
[
"# OptNet/qpth Example Sudoku Notebook\n\n*By [Brandon Amos](https://bamos.github.io) and [J. Zico Kolter](http://zicokolter.com/).*\n\n---\n\nThis notebook is released along with our paper\n[OptNet: Differentiable Optimization as a Layer in Neural Networks](https://arxiv.org/abs/1703.00443).\n\nThis notebook shows an example of constructing an\nOptNet layer in PyTorch with our [qpth library](https://github.com/locuslab/qpth)\nto solve [the game Sudoku](https://en.wikipedia.org/wiki/Sudoku)\nas a prediction problem from data.\nSee [our qpth documentation page](https://locuslab.github.io/qpth/)\nfor more details on how to use `qpth`.\nThe experiments for our paper that use this library are in\n[this repo](https://github.com/locuslab/optnet).\nSpecifically [here](https://github.com/locuslab/optnet/tree/master/sudoku)\nis the full source code for the publihsed version of Sudoku.\n\n\n## Setup and Dependencies\n\n+ Python/numpy/[PyTorch](https://pytorch.org)\n+ [qpth](https://github.com/locuslab/qpth):\n *Our fast QP solver for PyTorch released in conjunction with this paper.*\n+ [bamos/block](https://github.com/bamos/block):\n *Our intelligent block matrix library for numpy, PyTorch, and beyond.*\n+ Optional: [bamos/setGPU](https://github.com/bamos/setGPU):\n A small library to set `CUDA_VISIBLE_DEVICES` on multi-GPU systems.",
"_____no_output_____"
]
],
[
[
"import os\n\nimport sys\n\nimport numpy as np\nimport torch\n\nimport torch.nn as nn\nfrom torch.autograd import Function, Variable\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\n\nfrom qpth.qp import QPFunction\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nplt.style.use('bmh')\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"# Setup: Download the data and pretrained model\n\n+ The pre-trained model is for later.\n The following command should download everything to a tmp directory for you\n if you have the `wget` and `tar` commands installed.\n+ (Sorry for the bad form here)",
"_____no_output_____"
]
],
[
[
"tmpDir = \"/tmp/optnet.sudoku\"\ncmd = ('mkdir {}; cd {} &&'\n 'wget \"http://joule.isr.cs.cmu.edu:11235/optnet/arxiv.v1.sudoku.tgz\" && '\n 'tar xf arxiv.v1.sudoku.tgz').format(*[tmpDir]*2)\ndataDir = os.path.join(tmpDir, 'arxiv.v1.sudoku')\nassert os.system(cmd) == 0\n\nsys.path.append(tmpDir+'/arxiv.v1.sudoku')\nimport models # From /tmp/optnet.sudoku/arxiv.v1.sudoku/models.py",
"_____no_output_____"
],
[
"testPct = 0.1\n\nwith open('{}/2/features.pt'.format(dataDir), 'rb') as f:\n X = torch.load(f)\nwith open('{}/2/labels.pt'.format(dataDir), 'rb') as f:\n Y = torch.load(f)\n\nN, nFeatures = X.size(0), int(np.prod(X.size()[1:]))\n\nnTrain = int(N*(1.-testPct))\nnTest = N-nTrain\n\ntrainX = X[:nTrain]\ntrainY = Y[:nTrain]\ntestX = X[nTrain:]\ntestY = Y[nTrain:]",
"_____no_output_____"
]
],
[
[
"## What the data for the Sudoku task looks like\n\nThe inputs are incomplete boards and the outputs\nare the completed boards. Here's what the first\ninput and output in the test set looks like.",
"_____no_output_____"
]
],
[
[
"def decode_onehot(encoded_board):\n \"\"\"Take the unique argmax of the one-hot encoded board.\"\"\"\n v,I = torch.max(encoded_board, 0)\n return ((v>0).long()*(I+1)).squeeze()\n\nprint(\"First testing example input (unsolved Sudoku board): \", decode_onehot(testX[0]))\nprint(\"First testing example output (solved Sudoku board): \", decode_onehot(testY[0]))",
"First testing example input (unsolved Sudoku board): \n 0 0 2 4\n 3 0 0 1\n 0 4 0 0\n 0 0 3 0\n[torch.LongTensor of size 4x4]\n\nFirst testing example output (solved Sudoku board): \n 1 3 2 4\n 3 2 4 1\n 2 4 1 3\n 4 1 3 2\n[torch.LongTensor of size 4x4]\n\n"
]
],
[
[
"You may have noticed that we had to decode those examples.\nThat's because they're actually *one-hot encoded* for how\nwe're going to model the task.\nThat means that instead of representing the values as \nsomething between 1 and 4, they're represented\nas a 4-dimensional vector with a 1 in the index of the value.\nHere's what the same first example from the test set\nactually looks like:",
"_____no_output_____"
]
],
[
[
"print(\"First test example input one-hot encoded (unsolved Sudoku board): \", testX[0])\nprint(\"First test example output one-hot encoded (solved Sudoku board): \", testY[0])",
"First test example input one-hot encoded (unsolved Sudoku board): \n(0 ,.,.) = \n 0 0 0 0\n 0 0 0 1\n 0 0 0 0\n 0 0 0 0\n\n(1 ,.,.) = \n 0 0 1 0\n 0 0 0 0\n 0 0 0 0\n 0 0 0 0\n\n(2 ,.,.) = \n 0 0 0 0\n 1 0 0 0\n 0 0 0 0\n 0 0 1 0\n\n(3 ,.,.) = \n 0 0 0 1\n 0 0 0 0\n 0 1 0 0\n 0 0 0 0\n[torch.FloatTensor of size 4x4x4]\n\nFirst test example output one-hot encoded (solved Sudoku board): \n(0 ,.,.) = \n 1 0 0 0\n 0 0 0 1\n 0 0 1 0\n 0 1 0 0\n\n(1 ,.,.) = \n 0 0 1 0\n 0 1 0 0\n 1 0 0 0\n 0 0 0 1\n\n(2 ,.,.) = \n 0 1 0 0\n 1 0 0 0\n 0 0 0 1\n 0 0 1 0\n\n(3 ,.,.) = \n 0 0 0 1\n 0 0 1 0\n 0 1 0 0\n 1 0 0 0\n[torch.FloatTensor of size 4x4x4]\n\n"
]
],
[
[
"# Defining a model for this task\n\nWe've now turned (mini-)Sudoku into a machine learning task that\nyou can apply any model and learning algorithm to. \nIn this notebook, we'll just show how to initialize and train\nan OptNet model for this task.\nHowever you can play around and swap this out for any\nmodel you want!\nCheck out [our baseline models](https://github.com/locuslab/optnet/blob/master/sudoku/models.py)\nif you're interested.\n\nSudoku is actually an integer programming problem but\nwe can relax it to an LP (or LP with a small ridge term,\nwhich we'll actually use) that can be expressed as:\n\n```\ny* = argmin_y 0.5 eps z^T z - p^T y\n s.t. Ay = b\n y >= 0\n```\n\nTo quickly explain this, the quadratic term `0.5 eps z^T z`\nis a small ridge term so we can use `qpth`,\n`p` is the (flattened) one-hot encoded input,\nthe `-p^T y` term constrains the solution to contain\nthe same pieces as the unsolved board,\nand the linear equality constraints `Ay = b`\nencode the constraints of Sudoku (the row, columns,\nand sub-blocks must contain all of the digits).\n\nIf you want to check your understanding of this:\n\n1. What do some example constraints `a_i^T y = b_i` look like?\n2. What happens if we remove the linear equality constraint?\n\nImplementing this model is just a few lines of PyTorch with our qpth library.\nNote that in this notebook we'll just execute this on the CPU,\nbut for performance reasons you should use a GPU for serious\nexperiments:",
"_____no_output_____"
]
],
[
[
"class OptNet(nn.Module):\n def __init__(self, n, Qpenalty):\n super().__init__()\n nx = (n**2)**3\n self.Q = Variable(Qpenalty*torch.eye(nx).double())\n self.G = Variable(-torch.eye(nx).double())\n self.h = Variable(torch.zeros(nx).double())\n A_shape = (40, 64) # Somewhat magic, it's from the true solution.\n self.A = Parameter(torch.rand(A_shape).double())\n self.b = Variable(torch.ones(A_shape[0]).double())\n\n def forward(self, puzzles):\n nBatch = puzzles.size(0)\n\n p = -puzzles.view(nBatch, -1)\n\n return QPFunction(verbose=-1)(\n self.Q, p.double(), self.G, self.h, self.A, self.b\n ).float().view_as(puzzles)",
"_____no_output_____"
]
],
[
[
"That's it! Let's randomly initialize this model and see what it does on the first test set example. What do you expect?",
"_____no_output_____"
]
],
[
[
"model = OptNet(2, 0.1)\npred = model(Variable(testX[0].unsqueeze(0))).squeeze().data\n\nprint(\"First test example input (unsolved Sudoku board): \", decode_onehot(testX[0]))\nprint(\"First test example output (TRUE solved Sudoku board): \", decode_onehot(testY[0]))\nprint(\"First test example prediction: \", decode_onehot(pred))",
"First test example input (unsolved Sudoku board): \n 0 0 2 4\n 3 0 0 1\n 0 4 0 0\n 0 0 3 0\n[torch.LongTensor of size 4x4]\n\nFirst test example output (TRUE solved Sudoku board): \n 1 3 2 4\n 3 2 4 1\n 2 4 1 3\n 4 1 3 2\n[torch.LongTensor of size 4x4]\n\nFirst test example prediction: \n 3 4 1 2\n 2 3 3 2\n 3 1 2 3\n 1 1 2 2\n[torch.LongTensor of size 4x4]\n\n"
]
],
[
[
"Wow that prediction is way off!! That's expected since the model was randomly initialized. Note that at this point, some of the constraints actually make it impossible to match the unsolved board (like the `4` at the top right corner)\n\nLet's look a random nonsense constraint that the model just satisfied. Here are the coefficients in the first row, `a_1` and `b_1`. The last line here\nshows that the constraint is acutally satisfied (up to machine precision).",
"_____no_output_____"
]
],
[
[
"np.set_printoptions(precision=2)\na0 = model.A[0].data.numpy()\nb0 = model.b.data[0]\nz = pred.numpy().ravel()\nprint('First row of A:\\n', a0)\nprint('-'*30)\nprint('First entry of b: ', b0)\nprint('-'*30)\nprint('a0^T z - b: ', np.dot(a0, z) - b0)",
"First row of A:\n [ 0.49 0.74 0.66 0.42 0.95 0.73 0.83 0.2 0.39 0.63 0.36 0.25\n 0.4 0.26 0.81 0.88 0.98 0.61 0.89 0.9 0.51 0.15 0.11 0.82\n 0.14 0.22 0.56 0.13 0.64 0.82 0.92 0.25 0.2 0.04 0.25 0.63\n 0.07 0.68 0.78 0.34 0.62 0.01 0.72 0.54 0.07 0.41 0.43 0.18\n 0.02 0.21 0.62 0.81 0.3 0.97 0.29 0.51 0.87 0.43 0.6 0.14\n 0.15 0.16 0.15 0.69]\n------------------------------\nFirst entry of b: 1.0\n------------------------------\na0^T z - b: -5.92086668583e-09\n"
]
],
[
[
"# Training the model\n\nLet's start training this model my comparing the predictions\nto the true solutions and taking gradient steps.\nThis takes a while to run (overnight on a GPU), so here\nwe'll just take 10 steps through the first 10 training examples\nto illustrate what the full training would look like.",
"_____no_output_____"
]
],
[
[
"loss_fn = torch.nn.MSELoss()\n\n# Initialize the optimizer.\nlearning_rate = 1e-3\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\nfor t in range(10):\n x_batch = Variable(trainX[t].unsqueeze(0))\n y_batch = Variable(trainY[t].unsqueeze(0))\n \n # Forward pass: compute predicted y by passing x to the model.\n y_pred = model(x_batch)\n\n # Compute and print loss.\n loss = loss_fn(y_pred, y_batch)\n print('Iteration {}, loss = {:.2f}'.format(t, loss.data[0]))\n\n # Before the backward pass, use the optimizer object to zero all of the\n # gradients for the variables it will update (which are the learnable weights\n # of the model)\n optimizer.zero_grad()\n\n # Backward pass: compute gradient of the loss with respect to model\n # parameters\n loss.backward()\n\n # Calling the step function on an Optimizer makes an update to its\n # parameters\n optimizer.step()",
"Iteration 0, loss = 0.24\nIteration 1, loss = 0.24\nIteration 2, loss = 0.24\nIteration 3, loss = 0.24\nIteration 4, loss = 0.24\nIteration 5, loss = 0.23\nIteration 6, loss = 0.24\nIteration 7, loss = 0.24\nIteration 8, loss = 0.22\nIteration 9, loss = 0.24\n"
]
],
[
[
"# Looking at a pre-trained model\n\nImagine you kept that running for a while.\nLet's load my pre-trained model we downloaded earlier and\nsee the predictions on the first test example again:",
"_____no_output_____"
]
],
[
[
"A_file = os.path.join(tmpDir, 'arxiv.v1.sudoku', 'pretrained-optnet-A.pth')\ntrainedA = torch.load(A_file)\n\ntrainedModel = OptNet(2, 0.2)\ntrainedModel.A.data = trainedA\n\npred = trainedModel(Variable(testX[0].unsqueeze(0))).data.squeeze()\n\nprint(\"First test example input (unsolved Sudoku board): \", decode_onehot(testX[0]))\nprint(\"First test example output (TRUE solved Sudoku board): \", decode_onehot(testY[0]))\nprint(\"First test example prediction: \", decode_onehot(pred))",
"First test example input (unsolved Sudoku board): \n 0 0 2 4\n 3 0 0 1\n 0 4 0 0\n 0 0 3 0\n[torch.LongTensor of size 4x4]\n\nFirst test example output (TRUE solved Sudoku board): \n 1 3 2 4\n 3 2 4 1\n 2 4 1 3\n 4 1 3 2\n[torch.LongTensor of size 4x4]\n\nFirst test example prediction: \n 1 3 2 4\n 3 2 4 1\n 2 4 1 3\n 4 1 3 2\n[torch.LongTensor of size 4x4]\n\n"
]
],
[
[
"We did it! With just a few lines of code we've trained\nan intuitive model that solves Sudoku.\n\nAs a closing note, what does the trained `A` matrix look like?\nWith this formulation, we don't expect it to be the nice,\nsparse coefficient matrix encoding the rules we typically\nthink of Sudoku as since any row-transformed\nversion of this matrix is an equivalent valid solution:",
"_____no_output_____"
]
],
[
[
"trainedA",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a256c7206b52523773412b84b6132477c3b6934
| 42,703 |
ipynb
|
Jupyter Notebook
|
jupyter/BLOOMBERG/SektorHargaInflasi/script/SektorHargaInflasi3_3.ipynb
|
langpp/bappenas
|
f780607192bb99b9bc8fbe29412b4c6c49bf15ae
|
[
"Apache-2.0"
] | 1 |
2021-03-17T03:10:49.000Z
|
2021-03-17T03:10:49.000Z
|
jupyter/BLOOMBERG/SektorHargaInflasi/script/SektorHargaInflasi3_3.ipynb
|
langpp/bappenas
|
f780607192bb99b9bc8fbe29412b4c6c49bf15ae
|
[
"Apache-2.0"
] | null | null | null |
jupyter/BLOOMBERG/SektorHargaInflasi/script/SektorHargaInflasi3_3.ipynb
|
langpp/bappenas
|
f780607192bb99b9bc8fbe29412b4c6c49bf15ae
|
[
"Apache-2.0"
] | 1 |
2021-03-17T03:12:34.000Z
|
2021-03-17T03:12:34.000Z
| 85.065737 | 2,221 | 0.645482 |
[
[
[
"#IMPORT SEMUA LIBARARY",
"_____no_output_____"
],
[
"#IMPORT LIBRARY PANDAS\nimport pandas as pd\n#IMPORT LIBRARY UNTUK POSTGRE\nfrom sqlalchemy import create_engine\nimport psycopg2\n#IMPORT LIBRARY CHART\nfrom matplotlib import pyplot as plt\nfrom matplotlib import style\n#IMPORT LIBRARY BASE PATH\nimport os\nimport io\n#IMPORT LIBARARY PDF\nfrom fpdf import FPDF\n#IMPORT LIBARARY CHART KE BASE64\nimport base64\n#IMPORT LIBARARY EXCEL\nimport xlsxwriter ",
"_____no_output_____"
],
[
"#FUNGSI UNTUK MENGUPLOAD DATA DARI CSV KE POSTGRESQL",
"_____no_output_____"
],
[
"def uploadToPSQL(columns, table, filePath, engine):\n #FUNGSI UNTUK MEMBACA CSV\n df = pd.read_csv(\n os.path.abspath(filePath),\n names=columns,\n keep_default_na=False\n )\n #APABILA ADA FIELD KOSONG DISINI DIFILTER\n df.fillna('')\n #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN\n del df['kategori']\n del df['jenis']\n del df['pengiriman']\n del df['satuan']\n \n #MEMINDAHKAN DATA DARI CSV KE POSTGRESQL\n df.to_sql(\n table, \n engine,\n if_exists='replace'\n )\n \n #DIHITUNG APABILA DATA YANG DIUPLOAD BERHASIL, MAKA AKAN MENGEMBALIKAN KELUARAN TRUE(BENAR) DAN SEBALIKNYA\n if len(df) == 0:\n return False\n else:\n return True",
"_____no_output_____"
],
[
"#FUNGSI UNTUK MEMBUAT CHART, DATA YANG DIAMBIL DARI DATABASE DENGAN MENGGUNAKAN ORDER DARI TANGGAL DAN JUGA LIMIT\n#DISINI JUGA MEMANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF",
"_____no_output_____"
],
[
"def makeChart(host, username, password, db, port, table, judul, columns, filePath, name, subjudul, limit, negara, basePath):\n #TEST KONEKSI DATABASE\n try:\n #KONEKSI KE DATABASE\n connection = psycopg2.connect(user=username,password=password,host=host,port=port,database=db)\n cursor = connection.cursor()\n #MENGAMBL DATA DARI TABLE YANG DIDEFINISIKAN DIBAWAH, DAN DIORDER DARI TANGGAL TERAKHIR\n #BISA DITAMBAHKAN LIMIT SUPAYA DATA YANG DIAMBIL TIDAK TERLALU BANYAK DAN BERAT\n postgreSQL_select_Query = \"SELECT * FROM \"+table+\" ORDER BY tanggal ASC LIMIT \" + str(limit)\n \n cursor.execute(postgreSQL_select_Query)\n mobile_records = cursor.fetchall() \n uid = []\n lengthx = []\n lengthy = []\n #MELAKUKAN LOOPING ATAU PERULANGAN DARI DATA YANG SUDAH DIAMBIL\n #KEMUDIAN DATA TERSEBUT DITEMPELKAN KE VARIABLE DIATAS INI\n for row in mobile_records:\n uid.append(row[0])\n lengthx.append(row[1])\n if row[2] == \"\":\n lengthy.append(float(0))\n else:\n lengthy.append(float(row[2]))\n\n #FUNGSI UNTUK MEMBUAT CHART\n #bar\n style.use('ggplot')\n \n fig, ax = plt.subplots()\n #MASUKAN DATA ID DARI DATABASE, DAN JUGA DATA TANGGAL\n ax.bar(uid, lengthy, align='center')\n #UNTUK JUDUL CHARTNYA\n ax.set_title(judul)\n ax.set_ylabel('Total')\n ax.set_xlabel('Tanggal')\n \n ax.set_xticks(uid)\n #TOTAL DATA YANG DIAMBIL DARI DATABASE, DIMASUKAN DISINI\n ax.set_xticklabels((lengthx))\n b = io.BytesIO()\n #CHART DISIMPAN KE FORMAT PNG\n plt.savefig(b, format='png', bbox_inches=\"tight\")\n #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64\n barChart = base64.b64encode(b.getvalue()).decode(\"utf-8\").replace(\"\\n\", \"\")\n #CHART DITAMPILKAN\n plt.show()\n \n #line\n #MASUKAN DATA DARI DATABASE\n plt.plot(lengthx, lengthy)\n plt.xlabel('Tanggal')\n plt.ylabel('Total')\n #UNTUK JUDUL CHARTNYA\n plt.title(judul)\n plt.grid(True)\n l = io.BytesIO()\n #CHART DISIMPAN KE FORMAT PNG\n plt.savefig(l, format='png', bbox_inches=\"tight\")\n #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64\n lineChart = base64.b64encode(l.getvalue()).decode(\"utf-8\").replace(\"\\n\", \"\")\n #CHART DITAMPILKAN\n plt.show()\n \n #pie\n #UNTUK JUDUL CHARTNYA\n plt.title(judul)\n #MASUKAN DATA DARI DATABASE\n plt.pie(lengthy, labels=lengthx, autopct='%1.1f%%', \n shadow=True, startangle=180)\n \n plt.axis('equal')\n p = io.BytesIO()\n #CHART DISIMPAN KE FORMAT PNG\n plt.savefig(p, format='png', bbox_inches=\"tight\")\n #CHART YANG SUDAH DIJADIKAN PNG, DISINI DICONVERT KE BASE64\n pieChart = base64.b64encode(p.getvalue()).decode(\"utf-8\").replace(\"\\n\", \"\")\n #CHART DITAMPILKAN\n plt.show()\n \n #MENGAMBIL DATA DARI CSV YANG DIGUNAKAN SEBAGAI HEADER DARI TABLE UNTUK EXCEL DAN JUGA PDF\n header = pd.read_csv(\n os.path.abspath(filePath),\n names=columns,\n keep_default_na=False\n )\n #MENGHAPUS COLUMN YANG TIDAK DIGUNAKAN\n header.fillna('')\n del header['tanggal']\n del header['total']\n #MEMANGGIL FUNGSI EXCEL\n makeExcel(mobile_records, header, name, limit, basePath)\n #MEMANGGIL FUNGSI PDF\n makePDF(mobile_records, header, judul, barChart, lineChart, pieChart, name, subjudul, limit, basePath) \n \n #JIKA GAGAL KONEKSI KE DATABASE, MASUK KESINI UNTUK MENAMPILKAN ERRORNYA\n except (Exception, psycopg2.Error) as error :\n print (error)\n\n #KONEKSI DITUTUP\n finally:\n if(connection):\n cursor.close()\n connection.close()",
"_____no_output_____"
],
[
"#FUNGSI MAKEEXCEL GUNANYA UNTUK MEMBUAT DATA YANG BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2\n#PLUGIN YANG DIGUNAKAN ADALAH XLSXWRITER",
"_____no_output_____"
],
[
"def makeExcel(datarow, dataheader, name, limit, basePath):\n #MEMBUAT FILE EXCEL\n workbook = xlsxwriter.Workbook(basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/excel/'+name+'.xlsx')\n #MENAMBAHKAN WORKSHEET PADA FILE EXCEL TERSEBUT\n worksheet = workbook.add_worksheet('sheet1')\n #SETINGAN AGAR DIBERIKAN BORDER DAN FONT MENJADI BOLD\n row1 = workbook.add_format({'border': 2, 'bold': 1})\n row2 = workbook.add_format({'border': 2})\n #MENJADIKAN DATA MENJADI ARRAY\n data=list(datarow)\n isihead=list(dataheader.values)\n header = []\n body = []\n \n #LOOPING ATAU PERULANGAN, KEMUDIAN DATA DITAMPUNG PADA VARIABLE DIATAS\n for rowhead in dataheader:\n header.append(str(rowhead))\n \n for rowhead2 in datarow:\n header.append(str(rowhead2[1]))\n \n for rowbody in isihead[1]:\n body.append(str(rowbody))\n \n for rowbody2 in data:\n body.append(str(rowbody2[2]))\n \n #MEMASUKAN DATA DARI VARIABLE DIATAS KE DALAM COLUMN DAN ROW EXCEL\n for col_num, data in enumerate(header):\n worksheet.write(0, col_num, data, row1)\n \n for col_num, data in enumerate(body):\n worksheet.write(1, col_num, data, row2)\n \n #FILE EXCEL DITUTUP\n workbook.close()",
"_____no_output_____"
],
[
"#FUNGSI UNTUK MEMBUAT PDF YANG DATANYA BERASAL DARI DATABASE DIJADIKAN FORMAT EXCEL TABLE F2\n#PLUGIN YANG DIGUNAKAN ADALAH FPDF",
"_____no_output_____"
],
[
"def makePDF(datarow, dataheader, judul, bar, line, pie, name, subjudul, lengthPDF, basePath):\n #FUNGSI UNTUK MENGATUR UKURAN KERTAS, DISINI MENGGUNAKAN UKURAN A4 DENGAN POSISI LANDSCAPE\n pdf = FPDF('L', 'mm', [210,297])\n #MENAMBAHKAN HALAMAN PADA PDF\n pdf.add_page()\n #PENGATURAN UNTUK JARAK PADDING DAN JUGA UKURAN FONT\n pdf.set_font('helvetica', 'B', 20.0)\n pdf.set_xy(145.0, 15.0)\n #MEMASUKAN JUDUL KE DALAM PDF\n pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=judul, border=0)\n \n #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING\n pdf.set_font('arial', '', 14.0)\n pdf.set_xy(145.0, 25.0)\n #MEMASUKAN SUB JUDUL KE PDF\n pdf.cell(ln=0, h=2.0, align='C', w=10.0, txt=subjudul, border=0)\n #MEMBUAT GARIS DI BAWAH SUB JUDUL\n pdf.line(10.0, 30.0, 287.0, 30.0)\n pdf.set_font('times', '', 10.0)\n pdf.set_xy(17.0, 37.0)\n \n #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING\n pdf.set_font('Times','',10.0) \n #MENGAMBIL DATA HEADER PDF YANG SEBELUMNYA SUDAH DIDEFINISIKAN DIATAS\n datahead=list(dataheader.values)\n pdf.set_font('Times','B',12.0) \n pdf.ln(0.5)\n \n th1 = pdf.font_size\n \n #MEMBUAT TABLE PADA PDF, DAN MENAMPILKAN DATA DARI VARIABLE YANG SUDAH DIKIRIM\n pdf.cell(100, 2*th1, \"Kategori\", border=1, align='C')\n pdf.cell(177, 2*th1, datahead[0][0], border=1, align='C')\n pdf.ln(2*th1)\n pdf.cell(100, 2*th1, \"Jenis\", border=1, align='C')\n pdf.cell(177, 2*th1, datahead[0][1], border=1, align='C')\n pdf.ln(2*th1)\n pdf.cell(100, 2*th1, \"Pengiriman\", border=1, align='C')\n pdf.cell(177, 2*th1, datahead[0][2], border=1, align='C')\n pdf.ln(2*th1)\n pdf.cell(100, 2*th1, \"Satuan\", border=1, align='C')\n pdf.cell(177, 2*th1, datahead[0][3], border=1, align='C')\n pdf.ln(2*th1)\n \n #PENGATURAN PADDING\n pdf.set_xy(17.0, 75.0)\n \n #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING\n pdf.set_font('Times','B',11.0) \n data=list(datarow)\n epw = pdf.w - 2*pdf.l_margin\n col_width = epw/(lengthPDF+1)\n \n #PENGATURAN UNTUK JARAK PADDING\n pdf.ln(0.5)\n th = pdf.font_size\n \n #MEMASUKAN DATA HEADER YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF\n pdf.cell(50, 2*th, str(\"Negara\"), border=1, align='C')\n for row in data:\n pdf.cell(40, 2*th, str(row[1]), border=1, align='C')\n pdf.ln(2*th)\n \n #MEMASUKAN DATA ISI YANG DIKIRIM DARI VARIABLE DIATAS KE DALAM PDF\n pdf.set_font('Times','B',10.0)\n pdf.set_font('Arial','',9)\n pdf.cell(50, 2*th, negara, border=1, align='C')\n for row in data:\n pdf.cell(40, 2*th, str(row[2]), border=1, align='C')\n pdf.ln(2*th)\n \n #MENGAMBIL DATA CHART, KEMUDIAN CHART TERSEBUT DIJADIKAN PNG DAN DISIMPAN PADA DIRECTORY DIBAWAH INI\n #BAR CHART\n bardata = base64.b64decode(bar)\n barname = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-bar.png'\n with open(barname, 'wb') as f:\n f.write(bardata)\n \n #LINE CHART\n linedata = base64.b64decode(line)\n linename = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-line.png'\n with open(linename, 'wb') as f:\n f.write(linedata)\n \n #PIE CHART\n piedata = base64.b64decode(pie)\n piename = basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/img/'+name+'-pie.png'\n with open(piename, 'wb') as f:\n f.write(piedata)\n \n #PENGATURAN UNTUK UKURAN FONT DAN JUGA JARAK PADDING\n pdf.set_xy(17.0, 75.0)\n col = pdf.w - 2*pdf.l_margin\n widthcol = col/3\n #MEMANGGIL DATA GAMBAR DARI DIREKTORY DIATAS\n pdf.image(barname, link='', type='',x=8, y=100, w=widthcol)\n pdf.set_xy(17.0, 75.0)\n col = pdf.w - 2*pdf.l_margin\n pdf.image(linename, link='', type='',x=103, y=100, w=widthcol)\n pdf.set_xy(17.0, 75.0)\n col = pdf.w - 2*pdf.l_margin\n pdf.image(piename, link='', type='',x=195, y=100, w=widthcol)\n pdf.ln(2*th)\n \n #MEMBUAT FILE PDF\n pdf.output(basePath+'jupyter/BLOOMBERG/SektorHargaInflasi/pdf/'+name+'.pdf', 'F')",
"_____no_output_____"
],
[
"#DISINI TEMPAT AWAL UNTUK MENDEFINISIKAN VARIABEL VARIABEL SEBELUM NANTINYA DIKIRIM KE FUNGSI\n#PERTAMA MANGGIL FUNGSI UPLOADTOPSQL DULU, KALAU SUKSES BARU MANGGIL FUNGSI MAKECHART\n#DAN DI MAKECHART MANGGIL FUNGSI MAKEEXCEL DAN MAKEPDF",
"_____no_output_____"
],
[
"#DEFINISIKAN COLUMN BERDASARKAN FIELD CSV\ncolumns = [\n \"kategori\",\n \"jenis\",\n \"tanggal\",\n \"total\",\n \"pengiriman\",\n \"satuan\",\n]\n\n#UNTUK NAMA FILE\nname = \"SektorHargaInflasi3_3\"\n#VARIABLE UNTUK KONEKSI KE DATABASE\nhost = \"localhost\"\nusername = \"postgres\"\npassword = \"1234567890\"\nport = \"5432\"\ndatabase = \"bloomberg_SektorHargaInflasi\"\ntable = name.lower()\n#JUDUL PADA PDF DAN EXCEL\njudul = \"Data Sektor Harga Inflasi\"\nsubjudul = \"Badan Perencanaan Pembangunan Nasional\"\n#LIMIT DATA UNTUK SELECT DI DATABASE\nlimitdata = int(8)\n#NAMA NEGARA UNTUK DITAMPILKAN DI EXCEL DAN PDF\nnegara = \"Indonesia\"\n#BASE PATH DIRECTORY\nbasePath = 'C:/Users/ASUS/Documents/bappenas/'\n#FILE CSV\nfilePath = basePath+ 'data mentah/BLOOMBERG/SektorHargaInflasi/' +name+'.csv';\n#KONEKSI KE DATABASE\nengine = create_engine('postgresql://'+username+':'+password+'@'+host+':'+port+'/'+database)\n\n#MEMANGGIL FUNGSI UPLOAD TO PSQL\ncheckUpload = uploadToPSQL(columns, table, filePath, engine)\n#MENGECEK FUNGSI DARI UPLOAD PSQL, JIKA BERHASIL LANJUT MEMBUAT FUNGSI CHART, JIKA GAGAL AKAN MENAMPILKAN PESAN ERROR\nif checkUpload == True:\n makeChart(host, username, password, database, port, table, judul, columns, filePath, name, subjudul, limitdata, negara, basePath)\nelse:\n print(\"Error When Upload CSV\")",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a256f08e3960e2a3e980f9575d698885cfb1f47
| 66,457 |
ipynb
|
Jupyter Notebook
|
_posts/python/pandas/4. Index对象的创建,查、改、增、删和使用.ipynb
|
Acemyzoe/acemyzoe.github.io
|
4229ab253b6f3244ee835e6460c5297bd7ec3bd1
|
[
"MIT"
] | null | null | null |
_posts/python/pandas/4. Index对象的创建,查、改、增、删和使用.ipynb
|
Acemyzoe/acemyzoe.github.io
|
4229ab253b6f3244ee835e6460c5297bd7ec3bd1
|
[
"MIT"
] | 2 |
2020-04-24T01:52:41.000Z
|
2020-05-27T05:52:11.000Z
|
_posts/python/pandas/4. Index对象的创建,查、改、增、删和使用.ipynb
|
Acemyzoe/acemyzoe.github.io
|
4229ab253b6f3244ee835e6460c5297bd7ec3bd1
|
[
"MIT"
] | null | null | null | 22.900414 | 104 | 0.371458 |
[
[
[
"# Index对象的创建,、查、改、增、删和使用\n想要用好pandas,必须了解其核心对象之一的**索引**。\n- 索引类似于元组,其本身是不能赋值修改的;\n- 其在数据进行整体运算时,辅助自动对齐,这是pandas不同于其他数据处理库的一大特征;\n- 多层索引可以帮助改变表的形态,如透视表等。\n\n所以,这一章要仔细学习。",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"# 1. 单层索引",
"_____no_output_____"
],
[
"## 1.1 创建\n##### `pd.Index(data, dtype=Object, name=None)`\n- name:一维列表\n- dtype:索引元素的类型,默认为object型\n- name:索引的名字,类似于列的名字",
"_____no_output_____"
]
],
[
[
"data = ['a','b','c']\nindex = pd.Index(data, name = 'name1')\nindex",
"_____no_output_____"
]
],
[
[
"**从返回值可以看到,index由三部分组成,可以分别查看。**",
"_____no_output_____"
]
],
[
[
"index.name",
"_____no_output_____"
],
[
"index.values",
"_____no_output_____"
],
[
"index.dtype",
"_____no_output_____"
]
],
[
[
"## 1.2 查\n- 查询方式和一维ndarray或Series的.iloc[]完全一样。",
"_____no_output_____"
]
],
[
[
"index[0] # scalar,返回值",
"_____no_output_____"
],
[
"index[0:2] # 范围,返回index",
"_____no_output_____"
],
[
"index[[0, 2]] # 列表,返回index",
"_____no_output_____"
],
[
"mask = [True, False, True] # mask,返回index\nindex[mask]",
"_____no_output_____"
]
],
[
[
"## 1.3 改索引名\n虽然索引的值是不能修改的,但是名字确是可以修改的。",
"_____no_output_____"
],
[
"### 1.3.1 直接改",
"_____no_output_____"
]
],
[
[
"index.name = 'new_name' \nindex",
"_____no_output_____"
],
[
"index.set_names('new_name')",
"_____no_output_____"
]
],
[
[
"## 1.4 增",
"_____no_output_____"
],
[
"### 1.4.1 按位置添加一行\n##### `Index.insert(loc, value)`\n- loc:位置编号\n- value:值",
"_____no_output_____"
]
],
[
[
"index",
"_____no_output_____"
],
[
"index.insert(1,'d')",
"_____no_output_____"
]
],
[
[
"### 1.4.2 尾部添加多行\n##### `Index.append(other)`\n- other:其他索引对象",
"_____no_output_____"
]
],
[
[
"index1 = index.copy()\nindex1",
"_____no_output_____"
],
[
"index1.append(index)",
"_____no_output_____"
]
],
[
[
"### 1.4.2 并\n##### `Index.union(other)`",
"_____no_output_____"
]
],
[
[
"index2 = pd.Index(['b','c','d'])\nindex2",
"_____no_output_____"
],
[
"index1.union(index2)",
"_____no_output_____"
]
],
[
[
"## 1.5 删",
"_____no_output_____"
],
[
"### 1.5.1 按位置删除一行\n##### `Index.delete(loc)`\n- loc:位置编号",
"_____no_output_____"
]
],
[
[
"index1.delete(1)",
"_____no_output_____"
]
],
[
[
"### 1.5.2 按索引删除多行\n##### `Index.drop(labels)`\n- labels:索引列表",
"_____no_output_____"
]
],
[
[
"index1.drop(['a','b'])",
"_____no_output_____"
]
],
[
[
"### 1.5.3 取交\n##### `Index.intersection(other)`",
"_____no_output_____"
]
],
[
[
"index1.intersection(index2) ",
"_____no_output_____"
]
],
[
[
"# 2. 多层索引",
"_____no_output_____"
],
[
"## 2.1 创建\n##### `pd.MultiIndex.from_tuples(labels, names=None)`\n- labels:元组或列表的列表;\n- names:名字的列表。",
"_____no_output_____"
]
],
[
[
"# data = [['a','one'],['a','two'],['b','one']]\ndata = [('a','one'),('a','two'),('b','one')]\nindex = pd.MultiIndex.from_tuples(data, names=['name1','name2'])\nindex",
"_____no_output_____"
],
[
"s = pd.Series([1,2,3], index = index)\ns",
"_____no_output_____"
]
],
[
[
"## 2.2 查\n- 查询方法和单层索引完全一致。",
"_____no_output_____"
]
],
[
[
"index[0] # scalar,返回值",
"_____no_output_____"
],
[
"index[0:2] # 范围,返回MultiIndex",
"_____no_output_____"
],
[
"index[[0,2]] # 列表,返回MultiIndex",
"_____no_output_____"
],
[
"mask = [True, False, True] # mask,返回MultiIndex\nindex[mask]",
"_____no_output_____"
]
],
[
[
"##### 获取某一层索引 MultiIndex.get_level_values(level)\n- level:int,选中的那一层",
"_____no_output_____"
]
],
[
[
"index.get_level_values(0)",
"_____no_output_____"
],
[
"index.get_level_values(1)",
"_____no_output_____"
]
],
[
[
"## 2.3 改",
"_____no_output_____"
],
[
"### 2.3.1 改索引名\n##### `MultiIndex.set_names(names, level=None, inplace=False)`\n- names:要设置的名字,可以为名字的列表;\n- level:多层索引需要设置修改的索引层次,可以为列表,要与names匹配;\n- inplace:是否原地修改。",
"_____no_output_____"
]
],
[
[
"index.set_names('new_name_1',level=0)",
"_____no_output_____"
]
],
[
[
"### 2.3.2 改索引层次顺序\n##### `MultiIndex.swaplevel(i=-2, j=-1)`\n- 改变level i 和level j的次序",
"_____no_output_____"
]
],
[
[
"index.swaplevel()",
"_____no_output_____"
]
],
[
[
"##### `Series.swaplevel(i=-2, j=-1)`\n##### `DataFrame.swaplevel(i=-2, j=-1, axis=1)`\n- axis:0-行索引,1-列索引。\n这两个函数更实用一些。",
"_____no_output_____"
]
],
[
[
"s.swaplevel()",
"_____no_output_____"
],
[
"columns = index.copy()\ncolumns.set_names( names = ['name3','name4'], level = [0,1], inplace = True) #列索引取和行索引相同,只是改了名字\ndf = pd.DataFrame([[1,2,3],[4,5,6],[7,8,9]], index= index, columns = columns)\ndf",
"_____no_output_____"
],
[
"df.swaplevel(axis=1) # 交换列索引顺序",
"_____no_output_____"
]
],
[
[
"# 3. 多层索引使用方法\n当对values进行查看时,多层索引可以分开使用。",
"_____no_output_____"
]
],
[
[
"df1 = df.copy()\ndf1",
"_____no_output_____"
]
],
[
[
"**索引为空不代表缺失,缺省写法,意思是之前的索引一致。**",
"_____no_output_____"
],
[
"## 3.1 对于外层索引\n**记住:**\n- 无论是Series还是DataFrame,外层索引都是可以直接使用,也就是说可以认为只有这一层索引;\n- **用法和第二篇查、改、增、删提到的方法完全相同**。",
"_____no_output_____"
],
[
"### 3.1.1 []\n快捷操作,还是四种用法。",
"_____no_output_____"
]
],
[
[
"df1",
"_____no_output_____"
],
[
"df1['b'] # 列外层",
"_____no_output_____"
],
[
"df1[['a','b']] # 列外层",
"_____no_output_____"
],
[
"df1[0:2] # 行外层",
"_____no_output_____"
],
[
"mask =[True, False, True] # 行外层\ndf1[mask]",
"_____no_output_____"
]
],
[
[
"### 3.1.2 .loc[]\n行和列索引都使用索引形式。\n\n**下面都以第一维度为例,第二维可以类比。**",
"_____no_output_____"
]
],
[
[
"df1.loc['a','b'] # 单行索引'a' ",
"_____no_output_____"
],
[
"df1.loc['a':'b', 'b'] #范围'a': 'b'",
"_____no_output_____"
],
[
"df1.loc[['a','b'], 'b'] #列表",
"_____no_output_____"
],
[
"mask = [True, False, True] # mask\ndf1.loc[mask,'b']",
"_____no_output_____"
]
],
[
[
"### 3.1.3 .iloc[]\n这个简单,可以把索引都忽略掉,行和列都使用位置形式。",
"_____no_output_____"
]
],
[
[
"df1.iloc[0,0:2] ",
"_____no_output_____"
],
[
"df1.iloc[0:2, 0:2] ",
"_____no_output_____"
],
[
"df1.iloc[[0,1],0:2] ",
"_____no_output_____"
],
[
"mask = [True, False, True]\ndf1.iloc[mask,0:2]",
"_____no_output_____"
]
],
[
[
"## 3.2 对于内层索引\n- **内层索引不可直接使用,必须先外层、再内层,直接使用会报错;**\n- 内层只能使用单索引形式,其他形式报错。",
"_____no_output_____"
],
[
"### 3.2.1 [ , ]\n快捷操作,只有一种用法,取出一列。",
"_____no_output_____"
]
],
[
[
"df1",
"_____no_output_____"
],
[
"df1['a','one'] #取一列,先外层单列索引,再内层单列索引,其他形式都报错",
"_____no_output_____"
]
],
[
[
"### 3.2.2 .loc[ , ]",
"_____no_output_____"
]
],
[
[
"df1.loc['a','one'] # 取一行,先外层单行索引,再内层单列索引,其他形式都报错",
"_____no_output_____"
]
],
[
[
"### 3.2.3 .iloc[ , ]\n这种方法不受影响,因为 .iloc[] 无视索引,只按照位置定位,所以和3.1.3节外层索引部分完全相同。",
"_____no_output_____"
],
[
"## 3.3 xs直接选取法\n适合在单层level选取,不能行列同时操作。\n##### `Series.xs(key, level=None, drop_level=True)`\n##### `DataFrame.xs(key, axis=0, level=None, drop_level=True)`\n- key: 要选取的索引值或其列表;\n- axis:0-行索引,1-列索引;\n- level:索引层次;\n- drop_level:True or False,是否显示用于选取的level索引,默认不显示。",
"_____no_output_____"
]
],
[
[
"df1 = df.copy()\ndf1",
"_____no_output_____"
],
[
"df1.xs('one', axis=0, level=1) # 行索引的level 1, 有两行",
"_____no_output_____"
],
[
"df1.xs('two', axis=1, level=1) # 列索引的level 1,有一列",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a257d7e1c7e0059603bbb15cf206aa205882f67
| 16,946 |
ipynb
|
Jupyter Notebook
|
Mission_to_Mars/mission_to_mars.ipynb
|
manny-p/web-scraping-challenge
|
d1993f94a53180bd20b83b0113f4b627fef2072f
|
[
"ADSL"
] | null | null | null |
Mission_to_Mars/mission_to_mars.ipynb
|
manny-p/web-scraping-challenge
|
d1993f94a53180bd20b83b0113f4b627fef2072f
|
[
"ADSL"
] | null | null | null |
Mission_to_Mars/mission_to_mars.ipynb
|
manny-p/web-scraping-challenge
|
d1993f94a53180bd20b83b0113f4b627fef2072f
|
[
"ADSL"
] | null | null | null | 32.278095 | 979 | 0.458515 |
[
[
[
"# Dependencies\nimport pandas as pd\nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nfrom webdriver_manager.chrome import ChromeDriverManager",
"_____no_output_____"
],
[
"executable_path = {'executable_path': ChromeDriverManager().install()}\nbrowser = Browser('chrome', **executable_path, headless=False)",
"\n\n====== WebDriver manager ======\nCurrent google-chrome version is 93.0.4577\nGet LATEST driver version for 93.0.4577\nDriver [/Users/manuelparra/.wdm/drivers/chromedriver/mac64/93.0.4577.63/chromedriver] found in cache\n"
],
[
"# URL of page to be scraped\nurl = 'https://redplanetscience.com/'\nbrowser.visit(url)\nhtml = browser.html\nsoup = bs(html, 'html.parser')",
"_____no_output_____"
],
[
"# Retrieve content title\nnews_title = soup.find_all('div', class_='content_title')[0].text\nnews_title",
"_____no_output_____"
],
[
"# Retrieve latest news paragraph\nnews_p = soup.find_all('div', class_='article_teaser_body')[0].text\nnews_p",
"_____no_output_____"
],
[
"# JPL Mars Space Image\njpl_url = 'https://spaceimages-mars.com/'\njpl_img_url = 'https://spaceimages-mars.com/image/featured/mars2.jpg'\nbrowser.visit(jpl_img_url)\n\nhtml = browser.html\n\nsoup = bs(html, 'html.parser')",
"_____no_output_____"
],
[
"# Retrieve image link\nimg_path = soup.find_all('img')[0]['src']\nimg_path",
"_____no_output_____"
],
[
"# Scrape Mars Facts\nurl = 'https://galaxyfacts-mars.com/'\ntable = pd.read_html(url)\ntable",
"_____no_output_____"
],
[
"mars_facts = table[0]\nmars_facts = mars_facts.rename(columns={0: \"Mars - Earth Comparison\", 1: \"Mars\", 2: \"Earth\"})\nmars_facts.drop(index=mars_facts.index[0],\n axis=0,\n inplace=True)\nmars_facts.set_index(\"Mars - Earth Comparison\", inplace=True)\nmars_facts",
"_____no_output_____"
],
[
"html_table = mars_facts.to_html()\nhtml_table",
"_____no_output_____"
],
[
"html_table.replace('\\n', '')\nprint(html_table)",
"<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>Mars</th>\n <th>Earth</th>\n </tr>\n <tr>\n <th>Mars - Earth Comparison</th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>Diameter:</th>\n <td>6,779 km</td>\n <td>12,742 km</td>\n </tr>\n <tr>\n <th>Mass:</th>\n <td>6.39 × 10^23 kg</td>\n <td>5.97 × 10^24 kg</td>\n </tr>\n <tr>\n <th>Moons:</th>\n <td>2</td>\n <td>1</td>\n </tr>\n <tr>\n <th>Distance from Sun:</th>\n <td>227,943,824 km</td>\n <td>149,598,262 km</td>\n </tr>\n <tr>\n <th>Length of Year:</th>\n <td>687 Earth days</td>\n <td>365.24 days</td>\n </tr>\n <tr>\n <th>Temperature:</th>\n <td>-87 to -5 °C</td>\n <td>-88 to 58°C</td>\n </tr>\n </tbody>\n</table>\n"
],
[
"# Mars Hemispheres\nurl = 'https://marshemispheres.com/'\nbrowser.visit(url)\nhtml = browser.html\nsoup = bs(html, 'html.parser')",
"_____no_output_____"
],
[
"# Extract hemispheres item elements\nmars_hems = soup.find('div', class_='collapsible results')\nmars_item = mars_hems.find_all('div', class_='item')\nhemisphere_img_urls = []",
"_____no_output_____"
],
[
"for i in mars_item:\n \n hemisphere = i.find('div', class_=\"description\")\n title = hemisphere.h3.text\n\n hems_link = hemisphere.a[\"href\"]\n browser.visit(url + hems_link)\n image_html = browser.html\n image_soup = bs(image_html, 'html.parser')\n image_link = image_soup.find('div', class_='wide-image-wrapper')\n image_url = image_link.find('img', class_='wide-image')['src']\n\n image_dict = {'title': title, 'img_url': image_url}\n hemisphere_img_urls.append(image_dict)\n\nprint(hemisphere_img_urls)\n",
"[{'title': 'Cerberus Hemisphere Enhanced', 'img_url': 'images/f5e372a36edfa389625da6d0cc25d905_cerberus_enhanced.tif_full.jpg'}, {'title': 'Schiaparelli Hemisphere Enhanced', 'img_url': 'images/3778f7b43bbbc89d6e3cfabb3613ba93_schiaparelli_enhanced.tif_full.jpg'}, {'title': 'Syrtis Major Hemisphere Enhanced', 'img_url': 'images/555e6403a6ddd7ba16ddb0e471cadcf7_syrtis_major_enhanced.tif_full.jpg'}, {'title': 'Valles Marineris Hemisphere Enhanced', 'img_url': 'images/b3c7c6c9138f57b4756be9b9c43e3a48_valles_marineris_enhanced.tif_full.jpg'}]\n"
],
[
"mars_dict = {\n \"news_title\": news_title,\n \"news_p\": news_p,\n \"featured_image_url\": img_path,\n \"fact_table\": table,\n \"hemisphere_images\": hemisphere_img_urls\n}",
"_____no_output_____"
],
[
"mars_dict",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a258b91d1753436bcddddf548d6880e58bd315f
| 116,114 |
ipynb
|
Jupyter Notebook
|
notebooks/3.0-fb-gas_stations_along_route.ipynb
|
WGierke/informatiCup2018
|
a07c21f6e092e516028c2f45594efbb2071d9b79
|
[
"MIT"
] | null | null | null |
notebooks/3.0-fb-gas_stations_along_route.ipynb
|
WGierke/informatiCup2018
|
a07c21f6e092e516028c2f45594efbb2071d9b79
|
[
"MIT"
] | 12 |
2018-02-10T14:37:14.000Z
|
2021-06-01T21:52:44.000Z
|
notebooks/3.0-fb-gas_stations_along_route.ipynb
|
WGierke/informatiCup2018
|
a07c21f6e092e516028c2f45594efbb2071d9b79
|
[
"MIT"
] | 1 |
2018-01-27T11:20:40.000Z
|
2018-01-27T11:20:40.000Z
| 153.184697 | 24,503 | 0.776125 |
[
[
[
"%matplotlib inline\nimport numpy as np\nimport pandas as pd\nimport os\nimport sys\nsys.path.append('..')\nimport geopandas as gpd\nfrom shapely.geometry import Point\nfrom shapely.geometry import LineString\nfrom shapely.geometry import MultiPoint",
"_____no_output_____"
],
[
"GAS_STATIONS_PATH = os.path.join('..', 'data', 'raw', 'input_data', 'Eingabedaten', 'Tankstellen.csv')\ngas_stations_df = pd.read_csv(GAS_STATIONS_PATH, sep=';', names=['id', 'Name', 'Company', 'Street', 'House_Number', 'Postalcode', 'City', 'Lat', 'Long'],index_col='id')",
"_____no_output_____"
],
[
"gas_stations_df['Position'] = gas_stations_df.apply(lambda row: Point(row.Long, row.Lat), axis=1)",
"_____no_output_____"
],
[
"gas_stations_df.head(3)",
"_____no_output_____"
],
[
"positions = gpd.GeoSeries(gas_stations_df['Position'])",
"_____no_output_____"
]
],
[
[
"### Bounding Box Approach ",
"_____no_output_____"
]
],
[
[
"def get_gas_stations_in_area(bounding_box):\n \"\"\" bounding box is a (minx, miny, maxx, maxy) tuple\"\"\" # x = long, y = lat\n min_long, min_lat, max_long, max_lat = bounding_box\n assert min_long < max_long\n assert min_lat < max_lat\n return set(positions.cx[min_long:max_long,min_lat:max_lat].index)",
"_____no_output_____"
],
[
"def get_gas_stations_in_boxes(bounding_boxes):\n ids = [get_gas_stations_in_area(box) for box in bounding_boxes]\n return list(set.union(*ids))",
"_____no_output_____"
],
[
"boxes_potsdam_berlin = [((52.34416775186111, 13.092272842330203), (52.35864093016666, 13.187254280776756)),((52.35864093016666, 13.044782123106984), (52.37311410847222, 13.187254280776756)),((52.37311410847222, 13.021036763495317), (52.38758728677778, 13.210999640388309)),((52.38758728677778, 13.021036763495317), (52.41653364338889, 13.234744999999975)),((52.41653364338889, 13.139763561553536), (52.431006821694446, 13.234744999999975)),((52.431006821694446, 13.16350892116509), (52.44548, 13.258490359611642)),((52.44548, 13.16350892116509), (52.459953178305554, 13.282235719223195)),((52.459953178305554, 13.16350892116509), (52.474426356611126, 13.305981078834861)),((52.474426356611126, 13.187254280776756), (52.48889953491667, 13.305981078834861)),((52.48889953491667, 13.210999640388309), (52.503372713222234, 13.424707876892967)),((52.503372713222234, 13.234744999999975), (52.53231906983335, 13.448453236504633)),((52.53231906983335, 13.35347179805808), (52.5467922481389, 13.448453236504633))]\nboxes_small_potsdam_berlin = [((52.380350697625, 13.044782123106984), (52.40206046508334, 13.068527482718537)),((52.37311410847222, 13.068527482718537), (52.40206046508334, 13.080400162524484)),((52.36587751931945, 13.080400162524484), (52.40206046508334, 13.104145522136037)),((52.35864093016666, 13.104145522136037), (52.394823875930555, 13.11601820194187)),((52.35864093016666, 13.11601820194187), (52.38758728677778, 13.127890881747703)),((52.35864093016666, 13.127890881747703), (52.394823875930555, 13.139763561553536)),((52.35864093016666, 13.139763561553536), (52.40206046508334, 13.16350892116509)),((52.35864093016666, 13.16350892116509), (52.41653364338889, 13.175381600970923)),((52.380350697625, 13.175381600970923), (52.45271658915278, 13.187254280776756)),((52.380350697625, 13.187254280776756), (52.459953178305554, 13.19912696058259)),((52.394823875930555, 13.19912696058259), (52.467189767458336, 13.210999640388309)),((52.431006821694446, 13.210999640388309), (52.4816629457639, 13.222872320194142)),((52.43824341084722, 13.222872320194142), (52.48889953491667, 13.234744999999975)),((52.44548, 13.234744999999975), (52.49613612406946, 13.246617679805809)),((52.459953178305554, 13.246617679805809), (52.51060930237501, 13.258490359611642)),((52.467189767458336, 13.258490359611642), (52.517845891527784, 13.270363039417362)),((52.474426356611126, 13.270363039417362), (52.52508248068057, 13.282235719223195)),((52.48889953491667, 13.282235719223195), (52.52508248068057, 13.294108399029028)),((52.49613612406946, 13.294108399029028), (52.52508248068057, 13.305981078834861)),((52.503372713222234, 13.305981078834861), (52.52508248068057, 13.377217157669747)),((52.503372713222234, 13.377217157669747), (52.53231906983335, 13.412835197087134)),((52.51060930237501, 13.412835197087134), (52.53231906983335, 13.424707876892967))]",
"_____no_output_____"
],
[
"def js_box_2_python_box(js_boxes):\n return [(min_long, min_lat, max_long, max_lat) for ((min_lat,min_long),(max_lat,max_long)) in js_boxes]",
"_____no_output_____"
],
[
"boxes_potsdam_berlin_nice = js_box_2_python_box(boxes_potsdam_berlin)\nres = get_gas_stations_in_boxes(boxes_potsdam_berlin_nice)\ngpd.GeoSeries(gas_stations_df.loc[res]['Position']).plot()",
"_____no_output_____"
],
[
"boxes_potsdam_berlin_nice = js_box_2_python_box(boxes_small_potsdam_berlin)\nres = get_gas_stations_in_boxes(boxes_potsdam_berlin_nice)\ngpd.GeoSeries(gas_stations_df.loc[res]['Position']).plot();",
"_____no_output_____"
]
],
[
[
"### Buffer Approach ",
"_____no_output_____"
]
],
[
[
"path_potsdam_berlin = [(52.390530000000005, 13.064540000000001),(52.39041, 13.065890000000001),(52.39025, 13.06723),(52.39002000000001, 13.068810000000001),(52.389970000000005, 13.069350000000002),(52.38998, 13.06948),(52.389860000000006, 13.07028),(52.38973000000001, 13.07103),(52.38935000000001, 13.07352),(52.3892, 13.07463),(52.38918, 13.075120000000002),(52.389210000000006, 13.07553),(52.389300000000006, 13.0759),(52.3894, 13.076130000000001),(52.389520000000005, 13.07624),(52.38965, 13.07638),(52.389880000000005, 13.0767),(52.390100000000004, 13.077110000000001),(52.390330000000006, 13.077770000000001),(52.390440000000005, 13.078660000000001),(52.39052, 13.079400000000001),(52.390570000000004, 13.08004),(52.39056000000001, 13.08037),(52.390550000000005, 13.0806),(52.390530000000005, 13.080990000000002),(52.390420000000006, 13.083100000000002),(52.390440000000005, 13.083400000000001),(52.39038000000001, 13.083430000000002),(52.39011000000001, 13.0836),(52.38853, 13.084660000000001),(52.38801, 13.0851),(52.38774, 13.085410000000001),(52.38754, 13.085730000000002),(52.38729000000001, 13.086300000000001),(52.38689, 13.087610000000002),(52.386500000000005, 13.088960000000002),(52.38611, 13.09026),(52.38602, 13.090700000000002),(52.3858, 13.09121),(52.385290000000005, 13.092300000000002),(52.38477, 13.09331),(52.384040000000006, 13.094650000000001),(52.383500000000005, 13.095670000000002),(52.38302, 13.096580000000001),(52.37538000000001, 13.110970000000002),(52.37485, 13.112020000000001),(52.37471000000001, 13.112340000000001),(52.37436, 13.113220000000002),(52.373990000000006, 13.114300000000002),(52.37379000000001, 13.11494),(52.373580000000004, 13.11578),(52.37304, 13.11809),(52.37266, 13.119740000000002),(52.37252, 13.120540000000002),(52.37238000000001, 13.121540000000001),(52.37227000000001, 13.122710000000001),(52.37225, 13.12311),(52.372220000000006, 13.12376),(52.372220000000006, 13.124830000000001),(52.372260000000004, 13.128100000000002),(52.37229000000001, 13.131340000000002),(52.37234, 13.1369),(52.37232, 13.13785),(52.37228, 13.13859),(52.37220000000001, 13.13958),(52.37216, 13.140500000000001),(52.372150000000005, 13.141950000000001),(52.37218000000001, 13.14399),(52.37228, 13.147120000000001),(52.3723, 13.14906),(52.37232, 13.151140000000002),(52.37228, 13.15149),(52.37225, 13.151850000000001),(52.37219, 13.152070000000002),(52.372130000000006, 13.152210000000002),(52.372040000000005, 13.152360000000002),(52.371930000000006, 13.15248),(52.37181, 13.152560000000001),(52.37167, 13.152600000000001),(52.37153000000001, 13.152600000000001),(52.3714, 13.152550000000002),(52.371300000000005, 13.15248),(52.3712, 13.152370000000001),(52.37106000000001, 13.152130000000001),(52.37098, 13.151840000000002),(52.37095000000001, 13.151560000000002),(52.370960000000004, 13.15136),(52.371, 13.151090000000002),(52.37109, 13.150830000000001),(52.3712, 13.15066),(52.37129, 13.15056),(52.371460000000006, 13.15046),(52.37163, 13.150430000000002),(52.37181, 13.150400000000001),(52.37322, 13.150360000000001),(52.373670000000004, 13.150350000000001),(52.37375, 13.15032),(52.37451, 13.150310000000001),(52.375710000000005, 13.15028),(52.37670000000001, 13.150250000000002),(52.376960000000004, 13.150250000000002),(52.37715000000001, 13.150220000000001),(52.37742, 13.150160000000001),(52.377720000000004, 13.15013),(52.378040000000006, 13.150120000000001),(52.37812, 13.15009),(52.37825, 13.15004),(52.378800000000005, 13.15004),(52.379270000000005, 13.15009),(52.37962, 13.150150000000002),(52.380010000000006, 13.150240000000002),(52.380370000000006, 13.150360000000001),(52.380990000000004, 13.150620000000002),(52.38165000000001, 13.15098),(52.383500000000005, 13.152170000000002),(52.38440000000001, 13.15277),(52.3858, 13.153670000000002),(52.387080000000005, 13.1545),(52.38745, 13.154760000000001),(52.38768, 13.15496),(52.38794000000001, 13.155190000000001),(52.388380000000005, 13.155660000000001),(52.38891, 13.156350000000002),(52.38927, 13.156920000000001),(52.38965, 13.15755),(52.38984000000001, 13.15792),(52.39011000000001, 13.158520000000001),(52.390460000000004, 13.15943),(52.39074, 13.160380000000002),(52.392900000000004, 13.169300000000002),(52.39408, 13.1742),(52.39439, 13.175370000000001),(52.394830000000006, 13.176800000000002),(52.395320000000005, 13.17805),(52.39578, 13.179070000000001),(52.39621, 13.17993),(52.39678000000001, 13.18092),(52.39714000000001, 13.18148),(52.3975, 13.181970000000002),(52.398340000000005, 13.183000000000002),(52.39922000000001, 13.184000000000001),(52.399530000000006, 13.18438),(52.40012, 13.18504),(52.400940000000006, 13.185910000000002),(52.40171, 13.186750000000002),(52.402260000000005, 13.187420000000001),(52.403830000000006, 13.18917),(52.407830000000004, 13.193690000000002),(52.40982, 13.19593),(52.410230000000006, 13.19631),(52.41085, 13.19678),(52.411280000000005, 13.197030000000002),(52.41158000000001, 13.197180000000001),(52.41223, 13.197420000000001),(52.412620000000004, 13.197510000000001),(52.413030000000006, 13.19757),(52.413880000000006, 13.19757),(52.41407, 13.197560000000001),(52.41452, 13.197470000000001),(52.41536000000001, 13.19729),(52.41561, 13.197210000000002),(52.416720000000005, 13.19697),(52.417570000000005, 13.196760000000001),(52.41827000000001, 13.196610000000002),(52.42042000000001, 13.196130000000002),(52.4217, 13.195850000000002),(52.422740000000005, 13.19561),(52.423030000000004, 13.195500000000001),(52.42322000000001, 13.195390000000002),(52.423410000000004, 13.195260000000001),(52.42360000000001, 13.195120000000001),(52.42381, 13.194930000000001),(52.42409000000001, 13.194640000000001),(52.42443, 13.194170000000002),(52.424820000000004, 13.1935),(52.425160000000005, 13.19293),(52.42549, 13.192450000000001),(52.425720000000005, 13.192160000000001),(52.42607, 13.191820000000002),(52.426300000000005, 13.191640000000001),(52.42649, 13.19152),(52.42685, 13.191350000000002),(52.427310000000006, 13.191230000000001),(52.427530000000004, 13.191210000000002),(52.427890000000005, 13.191230000000001),(52.42887, 13.191460000000001),(52.43121000000001, 13.19204),(52.43244000000001, 13.192340000000002),(52.43292, 13.19246),(52.433400000000006, 13.1926),(52.43365000000001, 13.19269),(52.43403000000001, 13.192870000000001),(52.434470000000005, 13.193150000000001),(52.43478, 13.19339),(52.43506000000001, 13.193650000000002),(52.435340000000004, 13.19396),(52.43573000000001, 13.194440000000002),(52.43797000000001, 13.197270000000001),(52.438610000000004, 13.198080000000001),(52.44021000000001, 13.2001),(52.44169, 13.20198),(52.44489, 13.206010000000001),(52.446180000000005, 13.207640000000001),(52.45031, 13.212860000000001),(52.47092000000001, 13.238930000000002),(52.472350000000006, 13.240730000000001),(52.47289000000001, 13.24136),(52.474680000000006, 13.243440000000001),(52.47838, 13.247610000000002),(52.48109, 13.250670000000001),(52.48225000000001, 13.25201),(52.482800000000005, 13.2527),(52.48602, 13.25679),(52.48906, 13.260610000000002),(52.491670000000006, 13.26392),(52.49271, 13.26524),(52.49497, 13.268040000000001),(52.495160000000006, 13.268360000000001),(52.495760000000004, 13.26917),(52.496280000000006, 13.26984),(52.497170000000004, 13.27105),(52.497840000000004, 13.27194),(52.49857, 13.272870000000001),(52.49895000000001, 13.273460000000002),(52.49916, 13.273930000000002),(52.49929, 13.27434),(52.499390000000005, 13.274840000000001),(52.499460000000006, 13.275440000000001),(52.49949, 13.275970000000001),(52.49956, 13.277550000000002),(52.49963, 13.27838),(52.49969, 13.278830000000001),(52.499770000000005, 13.27918),(52.499900000000004, 13.279630000000001),(52.500060000000005, 13.28002),(52.500220000000006, 13.280330000000001),(52.50027000000001, 13.28035),(52.500370000000004, 13.28049),(52.50054, 13.280690000000002),(52.5007, 13.28082),(52.50085000000001, 13.280880000000002),(52.501020000000004, 13.2809),(52.50117, 13.280880000000002),(52.50155, 13.280740000000002),(52.50173, 13.280690000000002),(52.501960000000004, 13.28068),(52.502210000000005, 13.280780000000002),(52.502390000000005, 13.28086),(52.503310000000006, 13.28194),(52.50368, 13.282330000000002),(52.503930000000004, 13.282520000000002),(52.50423000000001, 13.28269),(52.504560000000005, 13.28279),(52.50522, 13.282820000000001),(52.50553000000001, 13.28284),(52.50583, 13.282890000000002),(52.50598, 13.282940000000002),(52.506350000000005, 13.283100000000001),(52.506620000000005, 13.28326),(52.508250000000004, 13.284370000000001),(52.509620000000005, 13.28527),(52.51070000000001, 13.28592),(52.511100000000006, 13.286100000000001),(52.511210000000005, 13.286150000000001),(52.51158, 13.286230000000002),(52.511700000000005, 13.286380000000001),(52.511810000000004, 13.286420000000001),(52.51239, 13.28658),(52.512570000000004, 13.28668),(52.512800000000006, 13.28687),(52.5129, 13.286890000000001),(52.51297, 13.286890000000001),(52.51299, 13.28706),(52.51301, 13.28738),(52.51308, 13.28842),(52.51274, 13.288520000000002),(52.51194, 13.288760000000002),(52.511300000000006, 13.288960000000001),(52.510560000000005, 13.289200000000001),(52.510380000000005, 13.289240000000001),(52.51043000000001, 13.289950000000001),(52.510510000000004, 13.291240000000002),(52.51066, 13.293750000000001),(52.51122, 13.30202),(52.51147, 13.30563),(52.51184000000001, 13.31169),(52.512080000000005, 13.315150000000001),(52.51239, 13.320010000000002),(52.51241, 13.320640000000001),(52.51234, 13.32089),(52.512280000000004, 13.320950000000002),(52.51218, 13.321090000000002),(52.51207, 13.32136),(52.51203, 13.3215),(52.51202000000001, 13.321800000000001),(52.51203, 13.322030000000002),(52.512060000000005, 13.322260000000002),(52.512150000000005, 13.322560000000001),(52.512280000000004, 13.32277),(52.512350000000005, 13.322840000000001),(52.51240000000001, 13.322880000000001),(52.51249000000001, 13.323070000000001),(52.512530000000005, 13.32314),(52.512550000000005, 13.32319),(52.512600000000006, 13.32333),(52.51263, 13.32342),(52.51265000000001, 13.323550000000001),(52.512950000000004, 13.32801),(52.513180000000006, 13.33182),(52.513470000000005, 13.33604),(52.5142, 13.346560000000002),(52.51433, 13.348690000000001),(52.51429, 13.34889),(52.51415, 13.349290000000002),(52.51404, 13.349480000000002),(52.513960000000004, 13.349680000000001),(52.51393, 13.349810000000002),(52.51391, 13.350100000000001),(52.51393, 13.35035),(52.513980000000004, 13.350570000000001),(52.514050000000005, 13.350740000000002),(52.514190000000006, 13.350950000000001),(52.51424, 13.350990000000001),(52.51444000000001, 13.351400000000002),(52.51453000000001, 13.351650000000001),(52.5146, 13.352200000000002),(52.51512, 13.36029),(52.51549000000001, 13.36617),(52.51567000000001, 13.369250000000001),(52.515950000000004, 13.37339),(52.51612, 13.376000000000001),(52.51615, 13.376740000000002),(52.51603000000001, 13.37682),(52.51596000000001, 13.376920000000002),(52.51585000000001, 13.37719),(52.51578000000001, 13.37733),(52.515710000000006, 13.37742),(52.515600000000006, 13.37747),(52.515480000000004, 13.37747),(52.51491000000001, 13.37738),(52.51458, 13.377360000000001),(52.514630000000004, 13.378250000000001),(52.514680000000006, 13.379040000000002),(52.51485, 13.379980000000002),(52.515150000000006, 13.381620000000002),(52.51521, 13.3823),(52.515350000000005, 13.38447),(52.515460000000004, 13.386030000000002),(52.51586, 13.38597),(52.51628, 13.385900000000001),(52.51668, 13.385860000000001),(52.51675, 13.38733),(52.51682, 13.388470000000002),(52.51688000000001, 13.3892),(52.51690000000001, 13.389650000000001),(52.51699000000001, 13.39024),(52.517010000000006, 13.3907),(52.51711, 13.392230000000001),(52.51717000000001, 13.392970000000002),(52.51724, 13.39333),(52.51731, 13.39413),(52.517340000000004, 13.394860000000001),(52.517430000000004, 13.39628),(52.517500000000005, 13.397430000000002),(52.51762, 13.398850000000001),(52.517720000000004, 13.39943),(52.517790000000005, 13.39971),(52.517900000000004, 13.400020000000001),(52.51796, 13.400260000000001),(52.51803, 13.400490000000001),(52.518640000000005, 13.4021),(52.51887000000001, 13.40262),(52.519000000000005, 13.40295),(52.51939, 13.4037),(52.519890000000004, 13.404660000000002),(52.520010000000006, 13.404950000000001)]",
"_____no_output_____"
],
[
"pb = LineString([(x,y) for y,x in path_potsdam_berlin])",
"_____no_output_____"
],
[
"# 1 grad sind ca 111km => entfernung von 1km = 0.01\npb.buffer(.02)",
"_____no_output_____"
],
[
"m = MultiPoint(list(zip(gas_stations_df['Long'],gas_stations_df['Lat'])))",
"_____no_output_____"
],
[
"pb.buffer(.02).intersection(m)",
"_____no_output_____"
]
],
[
[
"Keep a data set that is indexed by postion",
"_____no_output_____"
]
],
[
[
"def hash_pos(lat,long):\n return str(lat) + ':' + str(long)",
"_____no_output_____"
],
[
"gas_station_pos_index = gas_stations_df.copy()",
"_____no_output_____"
],
[
"gas_station_pos_index['str_pos'] = gas_station_pos_index.apply(lambda row: hash_pos(row.Lat,row.Long), axis=1)\ngas_station_pos_index = gas_station_pos_index.reset_index().set_index('str_pos')",
"_____no_output_____"
],
[
"gas_stations_near_path = [hash_pos(point.y,point.x) for point in pb.buffer(.02).intersection(m) ]",
"_____no_output_____"
],
[
"gas_station_pos_index.loc[gas_stations_near_path]['id']",
"_____no_output_____"
]
],
[
[
"### Find the point on the path closest to a gas station",
"_____no_output_____"
]
],
[
[
"gas_stations = pb.buffer(.02).intersection(m) ",
"_____no_output_____"
],
[
"gas_stations[0].union(pb)",
"_____no_output_____"
],
[
"def closest_point_on_path(path,point):\n return path.interpolate(path.project(point))",
"_____no_output_____"
],
[
"def length_on_line(path,point):\n return path.project(point,normalized=True)",
"_____no_output_____"
],
[
"closest_point_on_path(pb,gas_stations[0])",
"_____no_output_____"
],
[
"length_on_line(pb,gas_stations[0])",
"_____no_output_____"
],
[
"gas_stations[-1].union(pb)",
"_____no_output_____"
],
[
"MultiPoint([closest_point_on_path(pb,p) for p in gas_stations])",
"_____no_output_____"
],
[
"pb.length * 111",
"_____no_output_____"
],
[
"[length_on_line(pb,p) for p in gas_stations]",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a259bae3d20e06654ab3faf7a1e207f180893eb
| 12,352 |
ipynb
|
Jupyter Notebook
|
2019-02-13-Wine-Dataset.ipynb
|
brianspiering/PyDataSIG
|
6817949877cfb6706923136275ef345e17f8ce71
|
[
"Apache-2.0"
] | 2 |
2019-04-18T05:32:51.000Z
|
2019-05-15T21:59:36.000Z
|
2019-02-13-Wine-Dataset.ipynb
|
brianspiering/PyDataSIG
|
6817949877cfb6706923136275ef345e17f8ce71
|
[
"Apache-2.0"
] | null | null | null |
2019-02-13-Wine-Dataset.ipynb
|
brianspiering/PyDataSIG
|
6817949877cfb6706923136275ef345e17f8ce71
|
[
"Apache-2.0"
] | 5 |
2019-02-21T04:21:30.000Z
|
2021-12-13T11:15:52.000Z
| 29.270142 | 1,008 | 0.5034 |
[
[
[
"Welcome to PyData Special Interest Group @ SF Python Project Night\n-----\n\nThe goal is to have a sample dataset to explore together. \n\nWe are going to explore the Wine recognition dataset 🍷\n\nIt is a choose-your-own-adventure. If you are interested in visualization, do that. If you are interested in statistical modeling, explore that. If you are interested in machine learning or deep learning, try that.",
"_____no_output_____"
]
],
[
[
"# Here are common imports to get you started\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport sklearn\n\n%matplotlib inline",
"_____no_output_____"
],
[
"# Let's load the data\nfrom sklearn.datasets import load_wine\n\ndata = load_wine()",
"_____no_output_____"
],
[
"print(data.DESCR)",
".. _wine_dataset:\n\nWine recognition dataset\n------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 178 (50 in each of three classes)\n :Number of Attributes: 13 numeric, predictive attributes and the class\n :Attribute Information:\n \t\t- Alcohol\n \t\t- Malic acid\n \t\t- Ash\n\t\t- Alcalinity of ash \n \t\t- Magnesium\n\t\t- Total phenols\n \t\t- Flavanoids\n \t\t- Nonflavanoid phenols\n \t\t- Proanthocyanins\n\t\t- Color intensity\n \t\t- Hue\n \t\t- OD280/OD315 of diluted wines\n \t\t- Proline\n\n - class:\n - class_0\n - class_1\n - class_2\n\t\t\n :Summary Statistics:\n \n ============================= ==== ===== ======= =====\n Min Max Mean SD\n ============================= ==== ===== ======= =====\n Alcohol: 11.0 14.8 13.0 0.8\n Malic Acid: 0.74 5.80 2.34 1.12\n Ash: 1.36 3.23 2.36 0.27\n Alcalinity of Ash: 10.6 30.0 19.5 3.3\n Magnesium: 70.0 162.0 99.7 14.3\n Total Phenols: 0.98 3.88 2.29 0.63\n Flavanoids: 0.34 5.08 2.03 1.00\n Nonflavanoid Phenols: 0.13 0.66 0.36 0.12\n Proanthocyanins: 0.41 3.58 1.59 0.57\n Colour Intensity: 1.3 13.0 5.1 2.3\n Hue: 0.48 1.71 0.96 0.23\n OD280/OD315 of diluted wines: 1.27 4.00 2.61 0.71\n Proline: 278 1680 746 315\n ============================= ==== ===== ======= =====\n\n :Missing Attribute Values: None\n :Class Distribution: class_0 (59), class_1 (71), class_2 (48)\n :Creator: R.A. Fisher\n :Donor: Michael Marshall (MARSHALL%[email protected])\n :Date: July, 1988\n\nThis is a copy of UCI ML Wine recognition datasets.\nhttps://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\n\nThe data is the results of a chemical analysis of wines grown in the same\nregion in Italy by three different cultivators. There are thirteen different\nmeasurements taken for different constituents found in the three types of\nwine.\n\nOriginal Owners: \n\nForina, M. et al, PARVUS - \nAn Extendible Package for Data Exploration, Classification and Correlation. \nInstitute of Pharmaceutical and Food Analysis and Technologies,\nVia Brigata Salerno, 16147 Genoa, Italy.\n\nCitation:\n\nLichman, M. (2013). UCI Machine Learning Repository\n[http://archive.ics.uci.edu/ml]. Irvine, CA: University of California,\nSchool of Information and Computer Science. \n\n.. topic:: References\n\n (1) S. Aeberhard, D. Coomans and O. de Vel, \n Comparison of Classifiers in High Dimensional Settings, \n Tech. Rep. no. 92-02, (1992), Dept. of Computer Science and Dept. of \n Mathematics and Statistics, James Cook University of North Queensland. \n (Also submitted to Technometrics). \n\n The data was used with many others for comparing various \n classifiers. The classes are separable, though only RDA \n has achieved 100% correct classification. \n (RDA : 100%, QDA 99.4%, LDA 98.9%, 1NN 96.1% (z-transformed data)) \n (All results using the leave-one-out technique) \n\n (2) S. Aeberhard, D. Coomans and O. de Vel, \n \"THE CLASSIFICATION PERFORMANCE OF RDA\" \n Tech. Rep. no. 92-01, (1992), Dept. of Computer Science and Dept. of \n Mathematics and Statistics, James Cook University of North Queensland. \n (Also submitted to Journal of Chemometrics).\n\n"
]
],
[
[
"You can also learn more [here](https://archive.ics.uci.edu/ml/datasets/wine).",
"_____no_output_____"
]
],
[
[
"# This is a classification problem. Try to predict one of these categories.\nlist(data.target_names)",
"_____no_output_____"
],
[
"# Use these features / columns\ndata.feature_names",
"_____no_output_____"
],
[
"# Here is the raw data\ndata.data",
"_____no_output_____"
],
[
"# Now it is your turn to find something interesting",
"_____no_output_____"
]
],
[
[
" ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
4a25a9e4c2667f7346a95c3e56f4d4e32c3225d3
| 2,416 |
ipynb
|
Jupyter Notebook
|
01a_matplotlib_intro_part1_aside1_figure-Copy1.ipynb
|
portiafrances/matplotlib_oop_tutorial
|
717c3c893bed27b1fbf37bb73b710069bf062aac
|
[
"BSD-3-Clause"
] | null | null | null |
01a_matplotlib_intro_part1_aside1_figure-Copy1.ipynb
|
portiafrances/matplotlib_oop_tutorial
|
717c3c893bed27b1fbf37bb73b710069bf062aac
|
[
"BSD-3-Clause"
] | null | null | null |
01a_matplotlib_intro_part1_aside1_figure-Copy1.ipynb
|
portiafrances/matplotlib_oop_tutorial
|
717c3c893bed27b1fbf37bb73b710069bf062aac
|
[
"BSD-3-Clause"
] | null | null | null | 18.728682 | 117 | 0.526076 |
[
[
[
"# Aside 1 - Creating a Matplotlib-Figure using OOP",
"_____no_output_____"
]
],
[
[
"import matplotlib as mpl",
"_____no_output_____"
]
],
[
[
"If we try to create a figure after importing Matplotlib, using only the OOP-interface, we get an error-message.",
"_____no_output_____"
]
],
[
[
"fig = mpl.figure.Figure()",
"_____no_output_____"
]
],
[
[
"However, if we specify a backend, the command succeeds.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"fig = mpl.figure.Figure()",
"_____no_output_____"
],
[
"fig",
"_____no_output_____"
]
],
[
[
"This is because matplotlib's submodule *figure* needs to be imported explicitely before it can be used.\nWhen we call the matplotlib inlie magic functions, it does this for us. But we can also do this ourself.",
"_____no_output_____"
]
],
[
[
"import matplotlib.figure",
"_____no_output_____"
],
[
"fig = matplotlib.figure.Figure()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a25aacbbca7165b4246233c42322a2eb6d1a46a
| 7,885 |
ipynb
|
Jupyter Notebook
|
0021/confidence interval of median.ipynb
|
genkuroki/public
|
339ea5dfd424492a6b21d1df299e52d48902de18
|
[
"MIT"
] | 10 |
2021-06-06T00:33:49.000Z
|
2022-01-24T06:56:08.000Z
|
0021/confidence interval of median.ipynb
|
genkuroki/public
|
339ea5dfd424492a6b21d1df299e52d48902de18
|
[
"MIT"
] | null | null | null |
0021/confidence interval of median.ipynb
|
genkuroki/public
|
339ea5dfd424492a6b21d1df299e52d48902de18
|
[
"MIT"
] | 3 |
2021-08-02T11:58:34.000Z
|
2021-12-11T11:46:05.000Z
| 19.614428 | 100 | 0.4591 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a25b4856bc9b49ddd81a6fc07130f8422b11ae2
| 28,186 |
ipynb
|
Jupyter Notebook
|
examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb
|
jevenzh/NeMo
|
60993bc152973c44e8e94d74e1a757ddd543d99c
|
[
"Apache-2.0"
] | null | null | null |
examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb
|
jevenzh/NeMo
|
60993bc152973c44e8e94d74e1a757ddd543d99c
|
[
"Apache-2.0"
] | null | null | null |
examples/speaker_recognition/notebooks/Speaker_Recognition_hi-mia.ipynb
|
jevenzh/NeMo
|
60993bc152973c44e8e94d74e1a757ddd543d99c
|
[
"Apache-2.0"
] | null | null | null | 36.989501 | 695 | 0.627368 |
[
[
[
"\"\"\"\nYou can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n\nInstructions for setting up Colab are as follows:\n1. Open a new Python 3 notebook.\n2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n4. Run this cell to set up dependencies.\n\"\"\"\n# If you're using Google Colab and not running locally, run this cell.\nimport os\n!pip install wget\n!apt-get install sox\n\n!git clone https://github.com/NVIDIA/NeMo.git\nos.chdir('NeMo')\n!bash reinstall.sh\n\n!pip install unidecode",
"_____no_output_____"
]
],
[
[
"# **SPEAKER RECOGNITION** \n\nSpeaker Recognition (SR) is an broad research area which solves two major tasks: speaker identification (who is speaking?) and speaker verification (is the speaker who she claims to be?). In this work, we focmus on the far-field, text-independent speaker recognition when the identity of the speaker is based on how speech is spoken, not necessarily in what is being said. Typically such SR systems operate on unconstrained speech utterances, \nwhich are converted into vectors of fixed length, called speaker embeddings. Speaker embeddings are also used in automatic speech recognition (ASR) and speech synthesis.\n\nAs the goal of most speaker related systems is to get good speaker level embeddings that could help distinguish from other speakers, we shall first train these embeddings in end-to-end manner optimizing the [QuatzNet](https://arxiv.org/abs/1910.10261) based encoder model on cross-entropy loss. We modify the original quartznet based decoder to get these fixed size embeddings irrespective of the length of the input audio. We employ mean and variance based statistics pooling method to grab these embeddings.",
"_____no_output_____"
],
[
"In this tutorial we shall first train these embeddings on speaker related datasets and then get speaker embeddings from a pretrained network for a new dataset. Since Google Colab has very slow read-write speeds, Please run this locally for training on [hi-mia](https://arxiv.org/abs/1912.01231). \n\nWe use the [get_hi-mia-data.py](https://github.com/NVIDIA/NeMo/blob/master/scripts/get_hi-mia_data.py) script to download the necessary files, extract them, also re-sample to 16Khz if any of these samples are not at 16Khz. We do also provide scripts to score these embeddings for a speaker-verification task like hi-mia dataset at the end. ",
"_____no_output_____"
]
],
[
[
"data_dir = 'scripts/data/'\n!mkdir $data_dir\n\n# Download and process dataset. This will take a few moments...\n!python scripts/get_hi-mia_data.py --data_root=$data_data",
"_____no_output_____"
]
],
[
[
"After download and conversion, your `data` folder should contain directories with manifest files as:\n\n* `data/<set>/train.json`\n* `data/<set>/dev.json` \n* `data/<set>/{set}_all.json` \n\nAlso for each set we also create utt2spk files, these files later would be used in PLDA training.\n\nEach line in manifest file describes a training sample - `audio_filepath` contains path to the wav file, `duration` it's duration in seconds, and `label` is the speaker class label:\n\n`{\"audio_filepath\": \"<absolute path to dataset>/data/train/SPEECHDATA/wav/SV0184/SV0184_6_04_N3430.wav\", \"duration\": 1.22, \"label\": \"SV0184\"}` \n\n`{\"audio_filepath\": \"<absolute path to dataset>/data/train/SPEECHDATA/wav/SV0184/SV0184_5_03_F2037.wav\", duration\": 1.375, \"label\": \"SV0184\"}`\n\n",
"_____no_output_____"
],
[
"Import necessary packages",
"_____no_output_____"
]
],
[
[
"from ruamel.yaml import YAML\n\nimport nemo\nimport nemo.collections.asr as nemo_asr\nimport copy\nfrom functools import partial",
"_____no_output_____"
]
],
[
[
"# Building Training and Evaluation DAGs with NeMo\nBuilding a model using NeMo consists of \n\n1. Instantiating the neural modules we need\n2. specifying the DAG by linking them together.\n\nIn NeMo, the training and inference pipelines are managed by a NeuralModuleFactory, which takes care of checkpointing, callbacks, and logs, along with other details in training and inference. We set its log_dir argument to specify where our model logs and outputs will be written, and can set other training and inference settings in its constructor. For instance, if we were resuming training from a checkpoint, we would set the argument checkpoint_dir=`<path_to_checkpoint>`.\n\nAlong with logs in NeMo, you can optionally view the tensorboard logs with the create_tb_writer=True argument to the NeuralModuleFactory. By default all the tensorboard log files will be stored in {log_dir}/tensorboard, but you can change this with the tensorboard_dir argument. One can load tensorboard logs through tensorboard by running tensorboard --logdir=`<path_to_tensorboard dir>` in the terminal.",
"_____no_output_____"
]
],
[
[
"exp_name = 'quartznet3x2_hi-mia'\nwork_dir = './myExps/'\nneural_factory = nemo.core.NeuralModuleFactory(\n log_dir=work_dir+\"/hi-mia_logdir/\",\n checkpoint_dir=\"./myExps/checkpoints/\" + exp_name,\n create_tb_writer=True,\n random_seed=42,\n tensorboard_dir=work_dir+'/tensorboard/',\n)",
"_____no_output_____"
]
],
[
[
"Now that we have our neural module factory, we can specify our **neural modules and instantiate them**. Here, we load the parameters for each module from the configuration file. ",
"_____no_output_____"
]
],
[
[
"from nemo.utils import logging\nyaml = YAML(typ=\"safe\")\nwith open('examples/speaker_recognition/configs/quartznet_spkr_3x2x512_xvector.yaml') as f:\n spkr_params = yaml.load(f)\n\nsample_rate = spkr_params[\"sample_rate\"]\ntime_length = spkr_params.get(\"time_length\", 8)\nlogging.info(\"max time length considered for each file is {} sec\".format(time_length))",
"_____no_output_____"
]
],
[
[
"Instantiating train data_layer using config arguments. `labels = None` automatically creates output labels from manifest files, if you would like to pass those speaker names you can use the labels option. So while instantiating eval data_layer, we can use pass labels to the class in order to match same the speaker output labels as we have in the training data layer. This comes in handy while training on multiple datasets with more than one manifest file. ",
"_____no_output_____"
]
],
[
[
"train_dl_params = copy.deepcopy(spkr_params[\"AudioToSpeechLabelDataLayer\"])\ntrain_dl_params.update(spkr_params[\"AudioToSpeechLabelDataLayer\"][\"train\"])\ndel train_dl_params[\"train\"]\ndel train_dl_params[\"eval\"]\n\nbatch_size=64\ndata_layer_train = nemo_asr.AudioToSpeechLabelDataLayer(\n manifest_filepath=data_dir+'/train/train.json',\n labels=None,\n batch_size=batch_size,\n time_length=time_length,\n **train_dl_params,\n )\n\neval_dl_params = copy.deepcopy(spkr_params[\"AudioToSpeechLabelDataLayer\"])\neval_dl_params.update(spkr_params[\"AudioToSpeechLabelDataLayer\"][\"eval\"])\ndel eval_dl_params[\"train\"]\ndel eval_dl_params[\"eval\"]\n\ndata_layer_eval = nemo_asr.AudioToSpeechLabelDataLayer(\n manifest_filepath=data_dir+'/train/dev.json\",\n labels=data_layer_train.labels,\n batch_size=batch_size,\n time_length=time_length,\n **eval_dl_params,\n)\n\ndata_preprocessor = nemo_asr.AudioToMelSpectrogramPreprocessor(\n sample_rate=sample_rate, **spkr_params[\"AudioToMelSpectrogramPreprocessor\"],\n )\nencoder = nemo_asr.JasperEncoder(**spkr_params[\"JasperEncoder\"],)\n\ndecoder = nemo_asr.JasperDecoderForSpkrClass(\n feat_in=spkr_params[\"JasperEncoder\"][\"jasper\"][-1][\"filters\"],\n num_classes=data_layer_train.num_classes,\n pool_mode=spkr_params[\"JasperDecoderForSpkrClass\"]['pool_mode'],\n emb_sizes=spkr_params[\"JasperDecoderForSpkrClass\"][\"emb_sizes\"].split(\",\"),\n )\n\nxent_loss = nemo_asr.CrossEntropyLossNM(weight=None)",
"_____no_output_____"
]
],
[
[
"The next step is to assemble our training DAG by specifying the inputs to each neural module.",
"_____no_output_____"
]
],
[
[
"audio_signal, audio_signal_len, label, label_len = data_layer_train()\nprocessed_signal, processed_signal_len = data_preprocessor(input_signal=audio_signal, length=audio_signal_len)\nencoded, encoded_len = encoder(audio_signal=processed_signal, length=processed_signal_len)\nlogits, _ = decoder(encoder_output=encoded)\nloss = xent_loss(logits=logits, labels=label)",
"_____no_output_____"
]
],
[
[
"We would like to be able to evaluate our model on the dev set, as well, so let's set up the evaluation DAG.\n\nOur evaluation DAG will reuse most of the parts of the training DAG with the exception of the data layer, since we are loading the evaluation data from a different file but evaluating on the same model. Note that if we were using data augmentation in training, we would also leave that out in the evaluation DAG.",
"_____no_output_____"
]
],
[
[
"audio_signal_test, audio_len_test, label_test, _ = data_layer_eval()\nprocessed_signal_test, processed_len_test = data_preprocessor(\n input_signal=audio_signal_test, length=audio_len_test\n )\nencoded_test, encoded_len_test = encoder(audio_signal=processed_signal_test, length=processed_len_test)\nlogits_test, _ = decoder(encoder_output=encoded_test)\nloss_test = xent_loss(logits=logits_test, labels=label_test)",
"_____no_output_____"
]
],
[
[
"# Creating CallBacks\n\nWe would like to be able to monitor our model while it's training, so we use callbacks. In general, callbacks are functions that are called at specific intervals over the course of training or inference, such as at the start or end of every n iterations, epochs, etc. The callbacks we'll be using for this are the SimpleLossLoggerCallback, which reports the training loss (or another metric of your choosing, such as \\% accuracy for speaker recognition tasks), and the EvaluatorCallback, which regularly evaluates the model on the dev set. Both of these callbacks require you to pass in the tensors to be evaluated--these would be the final outputs of the training and eval DAGs above.\n\nAnother useful callback is the CheckpointCallback, for saving checkpoints at set intervals. We create one here just to demonstrate how it works.",
"_____no_output_____"
]
],
[
[
"from nemo.collections.asr.helpers import (\n monitor_classification_training_progress,\n process_classification_evaluation_batch,\n process_classification_evaluation_epoch,\n)\nfrom nemo.utils.lr_policies import CosineAnnealing\n\ntrain_callback = nemo.core.SimpleLossLoggerCallback(\n tensors=[loss, logits, label],\n print_func=partial(monitor_classification_training_progress, eval_metric=[1]),\n step_freq=1000,\n get_tb_values=lambda x: [(\"train_loss\", x[0])],\n tb_writer=neural_factory.tb_writer,\n )\n\ncallbacks = [train_callback]\n\nchpt_callback = nemo.core.CheckpointCallback(\n folder=\"./myExps/checkpoints/\" + exp_name,\n load_from_folder=\"./myExps/checkpoints/\" + exp_name,\n step_freq=1000,\n )\ncallbacks.append(chpt_callback)\n\ntagname = \"hi-mia_dev\"\neval_callback = nemo.core.EvaluatorCallback(\n eval_tensors=[loss_test, logits_test, label_test],\n user_iter_callback=partial(process_classification_evaluation_batch, top_k=1),\n user_epochs_done_callback=partial(process_classification_evaluation_epoch, tag=tagname),\n eval_step=1000, # How often we evaluate the model on the test set\n tb_writer=neural_factory.tb_writer,\n )\n\ncallbacks.append(eval_callback)",
"_____no_output_____"
]
],
[
[
"Now that we have our model and callbacks set up, how do we run it?\n\nOnce we create our neural factory and the callbacks for the information that we want to see, we can start training by simply calling the train function on the tensors we want to optimize and our callbacks! Since this notebook is for you to get started, by an4 as dataset is small it would quickly get higher accuracies. For better models use bigger datasets",
"_____no_output_____"
]
],
[
[
"# train model\nnum_epochs=25\nN = len(data_layer_train)\nsteps_per_epoch = N // batch_size\n\nlogging.info(\"Number of steps per epoch {}\".format(steps_per_epoch))\n\nneural_factory.train(\n tensors_to_optimize=[loss],\n callbacks=callbacks,\n lr_policy=CosineAnnealing(\n num_epochs * steps_per_epoch, warmup_steps=0.1 * num_epochs * steps_per_epoch,\n ),\n optimizer=\"novograd\",\n optimization_params={\n \"num_epochs\": num_epochs,\n \"lr\": 0.02,\n \"betas\": (0.95, 0.5),\n \"weight_decay\": 0.001,\n \"grad_norm_clip\": None,\n }\n )",
"_____no_output_____"
]
],
[
[
"Now that we trained our embeddings, we shall extract these embeddings using our pretrained checkpoint present at `checkpoint_dir`. As we can see from the neural architecture, we extract the embeddings after the `emb1` layer. \n",
"_____no_output_____"
],
[
"Now use the test manifest to get the embeddings. As we saw before, let's create a new `data_layer` for test. Use previously instiated models and attach the DAGs",
"_____no_output_____"
]
],
[
[
"eval_dl_params = copy.deepcopy(spkr_params[\"AudioToSpeechLabelDataLayer\"])\neval_dl_params.update(spkr_params[\"AudioToSpeechLabelDataLayer\"][\"eval\"])\ndel eval_dl_params[\"train\"]\ndel eval_dl_params[\"eval\"]\neval_dl_params['shuffle'] = False # To grab the file names without changing data_layer\n\ntest_dataset = data_dir+'/test/test_all.json',\ndata_layer_test = nemo_asr.AudioToSpeechLabelDataLayer(\n manifest_filepath=test_dataset,\n labels=None,\n batch_size=batch_size,\n **eval_dl_params,\n )\n\naudio_signal_test, audio_len_test, label_test, _ = data_layer_test()\nprocessed_signal_test, processed_len_test = data_preprocessor(\n input_signal=audio_signal_test, length=audio_len_test)\nencoded_test, _ = encoder(audio_signal=processed_signal_test, length=processed_len_test)\n_, embeddings = decoder(encoder_output=encoded_test)",
"_____no_output_____"
]
],
[
[
"Now get the embeddings using neural_factor infer command, that just does forward pass of all our modules. And save our embeddings in `<work_dir>/embeddings`",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport json\neval_tensors = neural_factory.infer(tensors=[embeddings, label_test], checkpoint_dir=\"./myExps/checkpoints/\" + exp_name)\n\ninf_emb, inf_label = eval_tensors\nwhole_embs = []\nwhole_labels = []\nmanifest = open(test_dataset, 'r').readlines()\n\nfor line in manifest:\n line = line.strip()\n dic = json.loads(line)\n filename = dic['audio_filepath'].split('/')[-1]\n whole_labels.append(filename)\n\nfor idx in range(len(inf_label)):\n whole_embs.extend(inf_emb[idx].numpy())\n\nembedding_dir = './myExps/embeddings/'\nif not os.path.exists(embedding_dir):\n os.mkdir(embedding_dir)\n\nfilename = os.path.basename(test_dataset).split('.')[0]\nname = embedding_dir + filename\n\nnp.save(name + '.npy', np.asarray(whole_embs))\nnp.save(name + '_labels.npy', np.asarray(whole_labels))\nlogging.info(\"Saved embedding files to {}\".format(embedding_dir))\n",
"_____no_output_____"
],
[
"!ls $embedding_dir",
"_____no_output_____"
]
],
[
[
"# Cosine Similarity Scoring\n\nHere we provide a script scoring on hi-mia whose trial file has structure `<speaker_name1> <speaker_name2> <target/nontarget>` . First copy the `trails_1m` file present in test folder to our embeddings directory",
"_____no_output_____"
]
],
[
[
"!cp $data_dir/test/trails_1m $embedding_dir/",
"_____no_output_____"
]
],
[
[
"the below command would output the EER% based on cosine similarity score",
"_____no_output_____"
]
],
[
[
"!python examples/speaker_recognition/hi-mia_eval.py --data_root $embedding_dir --emb $embedding_dir/test_all.npy --emb_labels $embedding_dir/test_all_labels.npy --emb_size 1024\n",
"_____no_output_____"
]
],
[
[
"# PLDA Backend\nTo finetune our speaker embeddings further, we used kaldi PLDA scripts to train PLDA and evaluate as well. so from this point going forward, please make sure you installed kaldi and was added to your path as KALDI_ROOT.\n\nTo train PLDA, we can either use dev set or training set. Let's use the training set embeddings to train PLDA and further use this trained PLDA model to score in test embeddings. in order to do that we should get embeddings for our training data as well. As similar to above steps, generate the train embeddings",
"_____no_output_____"
]
],
[
[
"test_dataset = data_dir+'/train/train.json',\n\ndata_layer_test = nemo_asr.AudioToSpeechLabelDataLayer(\n manifest_filepath=test_dataset,\n labels=None,\n batch_size=batch_size,\n **eval_dl_params,\n )\n\naudio_signal_test, audio_len_test, label_test, _ = data_layer_test()\nprocessed_signal_test, processed_len_test = data_preprocessor(\n input_signal=audio_signal_test, length=audio_len_test)\nencoded_test, _ = encoder(audio_signal=processed_signal_test, length=processed_len_test)\n_, embeddings = decoder(encoder_output=encoded_test)\n\neval_tensors = neural_factory.infer(tensors=[embeddings, label_test], checkpoint_dir=\"./myExps/checkpoints/\" + exp_name)\n\ninf_emb, inf_label = eval_tensors\nwhole_embs = []\nwhole_labels = []\nmanifest = open(test_dataset, 'r').readlines()\n\nfor line in manifest:\n line = line.strip()\n dic = json.loads(line)\n filename = dic['audio_filepath'].split('/')[-1]\n whole_labels.append(filename)\n\nfor idx in range(len(inf_label)):\n whole_embs.extend(inf_emb[idx].numpy())\n\nif not os.path.exists(embedding_dir):\n os.mkdir(embedding_dir)\n\nfilename = os.path.basename(test_dataset).split('.')[0]\nname = embedding_dir + filename\n\nnp.save(name + '.npy', np.asarray(whole_embs))\nnp.save(name + '_labels.npy', np.asarray(whole_labels))\nlogging.info(\"Saved embedding files to {}\".format(embedding_dir))\n",
"_____no_output_____"
]
],
[
[
"As part of kaldi necessary files we need `utt2spk` \\& `spk2utt` file to get ark file for PLDA training. to do that, copy the generated utt2spk file from `data_dir` train folder to create spk2utt file using \n\n`utt2spk_to_spk2utt.pl $data_dir/train/utt2spk > $embedding_dir/spk2utt`\n\nThen run the below python script to get EER score using PLDA backend scoring. This script does both data preparation for kaldi followed by PLDA scoring. ",
"_____no_output_____"
]
],
[
[
"!python examples/speaker_recognition/kaldi_plda.py --root $embedding_dir --train_embs $embedding_dir/train.npy --train_labels $embedding_dir/train_labels.npy \n--eval_embs $embedding_dir/all_embs_himia.npy --eval_labels $embedding_dir/all_ids_himia.npy --stage=1",
"_____no_output_____"
]
],
[
[
"Here `--stage = 1` trains PLDA model but if you already have a trained PLDA then you can directly evaluate on it by `--stage=2` option.\n\nThis should output an EER of 6.32% with minDCF: 0.455",
"_____no_output_____"
],
[
"# Performance Improvement\n\nTo improve your embeddings performance:\n \n* Add more data and Train longer (100 epochs)\n\n* Try adding the augmentation –see config file\n\n* Use larger model\n\n* Train on several GPUs and use mixed precision (on NVIDIA Volta and Turing GPUs)\n\n* Start with pre-trained checkpoints",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a25d194ac0421e25e3d15d02f7606c531288eeb
| 289,899 |
ipynb
|
Jupyter Notebook
|
surveys/2015-12-notebook-ux/analysis/prep/1_ux_survey_review.ipynb
|
Zsailer/surveys
|
952fd3bfc44fb3b63b911b7ffe785fd09b84e4e9
|
[
"CC0-1.0"
] | 21 |
2016-08-14T22:59:50.000Z
|
2021-10-23T01:32:04.000Z
|
surveys/2015-12-notebook-ux/analysis/prep/1_ux_survey_review.ipynb
|
Zsailer/surveys
|
952fd3bfc44fb3b63b911b7ffe785fd09b84e4e9
|
[
"CC0-1.0"
] | 7 |
2017-08-28T16:43:30.000Z
|
2022-01-13T03:42:40.000Z
|
surveys/2015-12-notebook-ux/analysis/prep/1_ux_survey_review.ipynb
|
Zsailer/surveys
|
952fd3bfc44fb3b63b911b7ffe785fd09b84e4e9
|
[
"CC0-1.0"
] | 31 |
2016-08-23T14:46:57.000Z
|
2021-03-02T20:27:41.000Z
| 125.497403 | 14,502 | 0.313775 |
[
[
[
"# Jupyter UX Survey 2015 - Initial Sandbox\n\n* Goal: Start looking at how we can surface insights from the data.\n* Description: https://github.com/jupyter/surveys/tree/master/surveys/2015-12-notebook-ux\n* Data: https://raw.githubusercontent.com/jupyter/surveys/master/surveys/2015-12-notebook-ux/20160115235816-SurveyExport.csv\n\n## Initial Questions\n\n### To what audiences is the Jupyter Community trying to cater?\n\n* New to the practice of \"data science\"\n* Experienced audience not using jupyter\n* Existing audience\n\n### How can we boil down the free text to \"themes\"?\n\n* Remove stop words and find key terms to do some frequency counts\n* Read and tag everything manually, then analyze the tags\n* Overlap between responses to the various questions\n* Apply coccurence grouping to the text\n* Throw text at the alchemy Keyword Extraction API and see what it pulls out\n* Bin short vs long and analyze separately\n\n### What roles do the survey respondant fill? And in what fields / industries do they fill those roles?\n\nSee the [Roles]() section down below.\n\n### Generally, what themes do we see across the free-text responses?\n\nSee the [Themes]() in hinderances section for an initial approach on how to find and expand on sets of themes for one particular question. We think we can apply this to the other questions as well.\n\n### What themes do we see across the free-text responses but within the role/industry categories?\n\ne.g., Is it always software developers that are asking for IDE features vs hard scientists asking for collaboration features?\n\nWe took an initial approach on rolling up the roles into a smaller set of categories. We then looked at mapping the requests for vim/emacs and ide feature to the software engineering related roles. It turns out that these requests seem to cross roles, and are not specific to software engineers. More of the responses for emacs/vim, in fact, came from respondants from the hard sciences (e.g. physicist, computational biologist, etc.)\n\nThis led us to believe that we should not assume certain roles are certain hinderances, but rather try to visualize if there are any hot-spots between roles and hinderance themes. It may turn out, we hypothesize, that the roles have little to do with the hinderances and that the themes are cross cutting. Or not.\n\nWe plan to create heatmap-like plots, one per question. On one axis we will have the role categories and on the other we will have the themes we identify within the responses for that question. After creating these plots for all questions, we'll also create similar plots where we substitute industry, years in role, # of notebook consumers, frequency of use, etc. on one of the axes and keep the themes on the other.\n\n### What shortcodes can we use to refer to the questions?\n\nAssume we roll up the answers into single columns:\n\n* how_often\n* how_long\n* hinderance\n* integrated\n* how_run\n* workflow_needs_addressed\n* workflow_needs_not_addressed\n* pleasant_aspects\n* difficult_aspects\n* features_changes\n* first_experience_enhancements\n* keywords\n* role\n* years_in_role\n* industry\n* notebook_consumers",
"_____no_output_____"
]
],
[
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import warnings\nwarnings.simplefilter('ignore')",
"_____no_output_____"
],
[
"import pandas as pd\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"pd.set_option('max_colwidth', 1000)",
"_____no_output_____"
],
[
"df = pd.read_csv('../20160115235816-SurveyExport.csv')",
"_____no_output_____"
],
[
"df.columns",
"_____no_output_____"
]
],
[
[
"## Themes in the hinderances",
"_____no_output_____"
],
[
"Let's start with the hinderances question and figure out the process first. Then we can apply it to the other free form text responses (we think).",
"_____no_output_____"
]
],
[
[
"hinder = df['What, if anything, hinders you from making Jupyter Notebook an even more regular part of your workflow?']",
"_____no_output_____"
]
],
[
[
"How many non-null responses are there?",
"_____no_output_____"
]
],
[
[
"hinder.isnull().value_counts()",
"_____no_output_____"
]
],
[
[
"Clear out the nulls.",
"_____no_output_____"
]
],
[
[
"hinder = hinder.dropna()",
"_____no_output_____"
]
],
[
[
"How much did people write?",
"_____no_output_____"
]
],
[
[
"char_lengths = hinder.apply(lambda response: len(response))",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(figsize=(11, 7))\nchar_lengths.hist(bins=100, ax=ax)\nax.set_title('Character count histogram')",
"_____no_output_____"
]
],
[
[
"We should definitely look at the longest responses. These are people who might have felt very strongly about what they were writing.",
"_____no_output_____"
]
],
[
[
"for row in hinder[char_lengths > 1400]:\n print(row)\n print()",
"It's starting to become a regular tool. I run the notebook server on a VPS and use the notebook through SSH. I run the notebook server on code in a Git master branch and I sync that with a local branch periodically. I have mostly just used Idle in the past because that is very flexible and I get a complete transcript of my programming session. But support for Idle and improvements to Idle have mostly stopped. I like the general idea of the notebook but the format doesn't support some things very well - testing for example. I would like to have my tests in a separate notebook that is linked to the one I'm currently developing (same kernel I guess). I would like my tests to be separate because if they are in the same notebook as my normal code then it is distracting. I could migrate out my tests to a separate file, but then they are not in the same notebook format as the regular code which is very convenient. Some things in Jupyter are frustrating: 1. the kernel cannot be reliably interrupted. 2. kernel crashes. This means that datasets must be reloaded which can take a long time in my case. 3. code is not reloaded easily: I have to manually use reload(module) if the code is in a file. The notebook should reliably reload modules: I realize that this is a long-standing problem in Python generally that has still not been resolved after all these years. 4. code reload in a cell is very buggy. I've had old code remain in a cell after making a change that has caused some strange results. A reload of the code in a cell should clean out all of the old definitions and variables in that cell so that there can't be any strange behaviour due to old references. \n\nInteractive plot exploration (through matplotlib) is a bit clunky and much worse than in Matlab, for example. You basically need to modify the code that you used to create a plot and rerun its cell in order to modify the plot. Embedded plots are not interactive and regular matplotlib figures are not that dynamic to begin with. You can zoom and pan but you cannot modify them in any other way (e.g. you cannot select a plot element and modify its color, delete it or add a fitting curve as in Matlab). Some of these are limitations of matplotlib of course but that is part of the whole \"Jupyter package\". Creating interactive plots and widgets is too complicated at the moment. It requires too much programming. It is hard to generate a nice report that you can share with non technical people from a Jupyter notebook because there is no way (AFAIK) to hide the code cells from the output. It would be cool to be able to mark a cell as \"hidden code\" which would be excluded from the generated PDF for example. The UI is a bit clunky and editing code is a bit painful when there is a lot of code in a cell. Being able to run cells out of order is cool but it also makes it hard to reproduce what you did later. This makes it particularly hard to turn a notebook into a proper Python script sometimes. It would be great if you could somehow replay the notebook in the order that you executed it and export that in the proper order. Maybe there should be a way to programmatically execute a cell from another cell (maybe this is already possible and Injust don't know about it).\n\nInability to easily navigate within modules/structured code directories, so that I can develop my code simultaneously as a package/module and within a notebook. Prototyping in the notebook is great, but then making sure everything works appropriately when outside of the notebook environment is more difficult without a lot more infrastructure. Furthermore, once it is ported to an external module, its essentially impossible to either bring the code back into the notebook efficiently (i.e., not just copying and pasting into cells) and or to maintain a coherent version control stream for the changes then made in the notebook. So currently software development in a notebook to external module is a 1-way street, and not a particularly easy one to navigate. Also, inconsistency in relative import statements make developing software packages in a notebook/module. Given the portable nature of the notebook, it's nice to be able to work from the same source files on a variety of computers. But absolute paths can't be used across machines (unless they follow the same convention) and its not clear how one would specify the absolute path relative to the location of the notebook server and vs. notebook itself. Editing documentation in the notebook (especially with executable examples) would make it easier to jump between source code and explicit documentation. Linking a particular cell out to a particular docstring for a particular function/class/method would be great.\n\n"
]
],
[
[
"Now just to get the constrast, let's look at some of the shortest responses.",
"_____no_output_____"
]
],
[
[
"hinder[char_lengths < 100].sample(20)",
"_____no_output_____"
]
],
[
[
"From reading a bunch of random samples of the shortest responses, we've got a list of ideas that we think we can search for across all of the responses in order to judge how common the themes are.\n \n* Nothing\n* UX / navigation / mobile / paradigm\n* IDE / debug / editor familiarity / comfort zone / keys\n* Setup / learning / getting started / perceived lack of skills\n* Inertia\n* Colleagues / peer pressure\n* Version control / git / history / tracking / provenance\n* Collaboration / export / sharing\n* Integration / missing languages / extensibility\n\nBefore we do, let's look at a few \"medium-length\" responses too for good measure.",
"_____no_output_____"
]
],
[
[
"for x in list(hinder[(char_lengths < 300) & (char_lengths > 100)].sample(20)):\n print(x)\n print()",
" * Lack of interconnection between notebooks. Loading notebooks as modules is still non-trivial * non-trivial UI customization * No native spell-checker * No \"templates\" * No more user profiles or read-only mode\n\n* No spell checker * Non-trivial to use notebooks as modules * Non-trivial customization of notebooks * No more read-only access \n\nThe requirement for sequential execution of cells and no concepts of 'modulazation' (call cell x in notebook y) means it is always somewhat cumbersome to use if you are doing anything complicated. But mostly the reluctance of others to use it. \n\nEditing capabilities for text. Ability to convert from notebook form to plain text and back again? Working on multiple notebooks.\n\nThe lack of version control (tracking changes using git does not work well) My own knowledge about how to use functions in other notebooks, etc.\n\nThe only time I don't use jupyter is when collaborating with others who don't. Usually the collaborators are stuck on Matlab and unwilling to change languages.\n\n- tighter integration with version control - cross project/directory search of notebooks - deployable notebooks - persistent widgets \n\nIt doesn't work well with standard version control tools because one line of code doesn't correspond to one line in the notebook file It throws away 30 years of text editor development by replacing a real editor like vim or emacs with a HTML text area.\n\nMost of the time I prefer working with the ipython shell directly. I use the notebook when I intend to share with others. \n\n1. For advanced terminal users, the lack of full emacs (or vi I guess) editing modes is annoying. Maybe it is just that the IPython terminal is too awesome already! 2. Version controlling ipynb files is annoying (but possible and I do it for some notebooks).\n\nExporting 'finished' reports is one aspect. Yes, there are workarounds to code it, but adding simple features that allow you to export analyzed data / diagrams / charts in high fidelity formats is very important.\n\nannoying to start the notebook server when I want to qucikly look at notebook content versioning notebooks (git) is a pain\n\nMostly, the fact that not many colleagues, collaborators and customers use it or are familiar with it.\n\nWe still need to have \"idiot-proof\" documentation for serving up your own Jupyter notebooks on a personal website. If we had that, I'd guarantee we'd see a surge in the number of notebook-enabled textbooks, demos, etc.\n\nCross language compatibility, i.e. running jupyter on different language kernels, such as scala, spark, groovy, clojure, etc.\n\n1. Difficulties diffing notebooks in version control. 2. Interface issues on mobile devices 3. Slide show usage is still tricky\n\nI don't actually use it for work, but for classes. If I did, thought, Jupyter would prove itself a great tool.\n\nalready a part of my workflow. Being able to convert from notebook to script is very important but I wish I could just run my notebook in production mode.\n\nNot clear what needs to be installed and what versions, particularly WRT widgets & extensions. I'm very excited about dashboards and can appreciate how fluid things are. Finding it difficult figuring out where & how to jump in.\n\n- development of modules with multiple files - development of notebooks that render lots of mathjax (it gets slow) -(painless) integration with distributed computing backends (this isn't really about the Notebook itself, but I had to say it)\n\n"
]
],
[
[
"We can add a few themes to the list we created above (which we'll replicate here to keep growing it as we go, because, history):\n\n* Nothing\n* UX / navigation / mobile / paradigm\n* IDE / debug / editor familiarity / comfort zone / keys\n* Setup / learning / getting started / perceived lack of skills / community / documentation\n* Inertia\n* Colleagues / peer pressure\n* Version control / git / history / tracking / provenance\n* Collaboration / export / sharing / dirt simple deploy\n* Customization / personalization\n* Reuse / modularization\n* Integration / missing languages / extensibility",
"_____no_output_____"
]
],
[
[
"keywords = ['git', 'version control', 'history', 'track', 'checkpoint', 'save']",
"_____no_output_____"
],
[
"def keywords_or(text):\n for keyword in keywords:\n if keyword in text: \n return text\n return None",
"_____no_output_____"
],
[
"results = hinder.map(keywords_or)",
"_____no_output_____"
],
[
"len(results.dropna())",
"_____no_output_____"
],
[
"results.dropna()",
"_____no_output_____"
]
],
[
[
"Moving forward, here's a semi-automatic procedure we can follow for identifying themes across questions:\n\n1. Take a random sample of question responses\n2. Write down common theme keywords\n3. Search back through the responses using the theme keywords\n4. Expand the set of keywords with other words seen in the search results\n5. Repeat for all themes and questions\n\nLater, we can use a fully automated topic modeling approach to validate our manually generated themes.",
"_____no_output_____"
],
[
"## Roles\n\nWe want to pull out the major roles that people self-identified as filling when they use Jupyter Notebook.",
"_____no_output_____"
]
],
[
[
"roles_df = df[['What is your primary role when using Jupyter Notebook (e.g., student,\\xa0astrophysicist, financial modeler, business manager, etc.)?']]",
"_____no_output_____"
],
[
"roles_df = roles.dropna()",
"_____no_output_____"
]
],
[
[
"We're renaming the column for brevity only.",
"_____no_output_____"
]
],
[
[
"roles_df.columns = ['role']",
"_____no_output_____"
]
],
[
[
"Some basic normalization. TODO: do more later.",
"_____no_output_____"
]
],
[
[
"roles_df['role_norm'] = roles_df.role.str.lower()",
"_____no_output_____"
]
],
[
[
"For now, we're going to look at the top 20 and see what industries they support from the other columns",
"_____no_output_____"
]
],
[
[
"roles_df.role_norm.value_counts()",
"_____no_output_____"
]
],
[
[
"## Industry vs Role",
"_____no_output_____"
]
],
[
[
"len(df['Industry #1:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?'].dropna())",
"_____no_output_____"
],
[
"len(df['Industry #2:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?'].dropna())",
"_____no_output_____"
],
[
"len(df['Industry #3:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?'].dropna())",
"_____no_output_____"
],
[
"industry_df = df[\n ['Industry #1:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?',\n 'Industry #2:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?',\n 'Industry #3:What industries does your role and analytical work support (e.g., Journalism, IT, etc.)?',\n 'What is your primary role when using Jupyter Notebook (e.g., student,\\xa0astrophysicist, financial modeler, business manager, etc.)?'\n ]\n]",
"_____no_output_____"
],
[
"industry_df.columns = ['industry1', 'industry2', 'industry3', 'role']",
"_____no_output_____"
],
[
"industry_df = industry_df.dropna(how='all')",
"_____no_output_____"
],
[
"top_roles = roles_df.role_norm.value_counts()[:20]",
"_____no_output_____"
],
[
"top_industry_df = industry_df[industry_df.role.isin(top_roles.index)]",
"_____no_output_____"
],
[
"top_industry_df[top_industry_df.role == 'data analyst']",
"_____no_output_____"
]
],
[
[
"## Example: Software Engineering Role\n\nWe want to see if software engineers (or related roles) are the ones asking about IDE-like features.",
"_____no_output_____"
]
],
[
[
"software_roles = ['engineer', 'software engineer', 'developer', 'software developer', 'programmer']",
"_____no_output_____"
],
[
"role_hinder_df = pd.merge(roles_df, hinder_df, left_index=True, right_index=True)",
"_____no_output_____"
],
[
"role_hinder_df = role_hinder_df.ix[:, 1:]",
"_____no_output_____"
],
[
"role_hinder_df[role_hinder_df.role_norm.isin(software_roles)]",
"_____no_output_____"
],
[
"tmp_df = role_hinder_df.dropna()",
"_____no_output_____"
],
[
"tmp_df[tmp_df.ix[:, 1].str.contains('emacs|vim', case=False)]",
"_____no_output_____"
],
[
"tmp_df[tmp_df.ix[:, 1].str.contains('\\W+ide\\W+', case=False)]",
"_____no_output_____"
]
],
[
[
"## Years in Role vs Role Name",
"_____no_output_____"
]
],
[
[
"years_in_role = df.ix[:, 32]",
"_____no_output_____"
],
[
"years_in_role.value_counts()",
"_____no_output_____"
],
[
"how_long = df.ix[:, 5]",
"_____no_output_____"
],
[
"how_long.value_counts()",
"_____no_output_____"
],
[
"using_vs_role = df[[5, 32]]",
"_____no_output_____"
],
[
"using_vs_role.columns = ['how_long_using', 'how_long_role']",
"_____no_output_____"
],
[
"pd.crosstab(using_vs_role.how_long_role, using_vs_role.how_long_using)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a25d54ad94a6bf3f27bdd1b7c7b0ef3f5cb8a6d
| 31,411 |
ipynb
|
Jupyter Notebook
|
PitStop_Predictor.ipynb
|
norberte/F1-End-of-Stint-or-NOT
|
04f0a0dd51689d8b2935009a299e13fc5da1cf79
|
[
"MIT"
] | null | null | null |
PitStop_Predictor.ipynb
|
norberte/F1-End-of-Stint-or-NOT
|
04f0a0dd51689d8b2935009a299e13fc5da1cf79
|
[
"MIT"
] | null | null | null |
PitStop_Predictor.ipynb
|
norberte/F1-End-of-Stint-or-NOT
|
04f0a0dd51689d8b2935009a299e13fc5da1cf79
|
[
"MIT"
] | null | null | null | 31.069238 | 525 | 0.519691 |
[
[
[
"# Objective\nBuild a binary classifier that given a sequence of lap times will predict if a pit-stop will happen or not the next lap .. in other words I call this project End-of-Stint-or-NOT\n\nData Source:\n- Ergast Developer API: https://ergast.com/mrd/\n\n## Table of Content:\n* [Data Preparation](#Section1)\n * [Import data](#section_1_1)\n * [Pit Stop Table Transformation](#section_1_2)\n * [Lap Times Table Transformation](#section_1_3)\n * [Left Join New Pit-Stop with New Lap-Times](#section_1_4)\n * [TBC](#section_1_5)\n * [TBC](#section_1_6)",
"_____no_output_____"
],
[
"## Data Preparation <a class=\"anchor\" id=\"Section1\"></a>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"### Import Data <a class=\"anchor\" id=\"section_1_1\"></a>",
"_____no_output_____"
]
],
[
[
"laps_master = pd.read_csv('data/lap_times.csv')\nraces_master = pd.read_csv('data/races.csv')\nquali_master = pd.read_csv('data/qualifying.csv')\ndrivers_master = pd.read_csv('data/drivers.csv')\nconstructors_master = pd.read_csv('data/constructors.csv')\nresults_master = pd.read_csv('data/results.csv')\ncircuits_master = pd.read_csv('data/circuits.csv')\npits_master = pd.read_csv('data/pit_stops.csv')",
"_____no_output_____"
],
[
"pits_master",
"_____no_output_____"
]
],
[
[
"### Pit Stop Table Transformation <a class=\"anchor\" id=\"section_1_2\"></a>",
"_____no_output_____"
],
[
"Create new data frame with a list of laps when a pit stop was occuring for each driver, for each race",
"_____no_output_____"
]
],
[
[
"pits_df_new = pits_master.groupby(['raceId', 'driverId'])['lap'].apply(list).reset_index(name='laps_when_pitstop')\npits_df_new",
"_____no_output_____"
]
],
[
[
"#### Preview the lap times table\nLet's take a look at a random race, and random driver, and see how the lap times look .. just to better understand what transformation needs to be done on the data",
"_____no_output_____"
]
],
[
[
"laps_master[laps_master.raceId == 841][laps_master.driverId == 17]",
"_____no_output_____"
]
],
[
[
"### Lap Times Table Transformation <a class=\"anchor\" id=\"section_1_3\"></a>",
"_____no_output_____"
],
[
"Create a new data frame containing a list of all the lap times in one row, for an entire race, for each driver",
"_____no_output_____"
]
],
[
[
"laps_df_new = laps_master.groupby(['raceId', 'driverId'])['milliseconds'].apply(list).reset_index(name='race_lap_times')\nlaps_df_new",
"_____no_output_____"
]
],
[
[
"### Left Join New Pit-Stop Table with New Lap-Times Table <a class=\"anchor\" id=\"section_1_4\"></a>",
"_____no_output_____"
]
],
[
[
"merged = pd.merge(pits_df_new, laps_df_new, on=['raceId', 'driverId'], how='left')\nmerged",
"_____no_output_____"
]
],
[
[
"### Lap Times Before Pit-Stop Sequence Partitioning <a class=\"anchor\" id=\"section_1_5\"></a>",
"_____no_output_____"
]
],
[
[
"def partition_lapTime_into_sequences(pitStop_laps, race_lapTimes):\n # NOTE: no need to return the last stint, since it is not followed by a pit stop... \n # only return sequence of lap times that are followed by a pit stop\n\n # returns: list of lap time sequences (which as lists) ... so list of lists\n \n # remove pit stops from first lap... those occur because of a collision, so they should not be looked at when predicting the end of the stint\n if 1 in pitStop_laps:\n pitStop_laps = pitStop_laps[1:] # remove first lap pit stop, as it was not a regular, planned one\n race_lapTimes = race_lapTimes[1:] # remove the first lap time, since the stint was \"corrupted\" by the emergency pitstop \n pitStop_laps[:] = [x - 1 for x in pitStop_laps] # subtract one lap from the pit-stop lap count, to account for the first lap being removed\n \n if len(pitStop_laps) < 1:\n return np.nan # no real stints have occured. Pitted on lap 1, then never pitted again during the race.\n \n sequences = []\n prev_pit = pitStop_laps[0]\n \n if len(pitStop_laps) == 1: # if the race is a one-stop race \n sequences.append(race_lapTimes[:prev_pit-1]) # the off-by-one accounts for not taking into consideration the lap with the pit-stop as part of the sequence\n else: # multi-stop race as\n \n for current_pit in pitStop_laps:\n if current_pit == prev_pit: # this is only true when prev_pit = pitStop_laps[0]\n sequences.append(race_lapTimes[:current_pit-1]) # create first stint\n # the off-by-one accounts for not taking into consideration the lap with the pit-stop as part of the sequence\n else:\n sequences.append(race_lapTimes[prev_pit:current_pit-1]) # create next sequence from (prev-pit-lap, current_pit-lap)\n prev_pit = current_pit # update pointer to previous pit ... this will be needed for the next pit\n return sequences",
"_____no_output_____"
]
],
[
[
"### Sequencing Function Test cases",
"_____no_output_____"
]
],
[
[
"sample_input_pits = merged.iloc[13, :].laps_when_pitstop\nsample_input_lapTimes = merged.iloc[13, :].race_lap_times\n\nprint(\"input pits: \", sample_input_pits)\nprint(\"input laps: \", sample_input_lapTimes)\n\nprint(\"output: \", partition_lapTime_into_sequences(sample_input_pits, sample_input_lapTimes))",
"_____no_output_____"
]
],
[
[
"To DO: write test cases",
"_____no_output_____"
],
[
"### Get Lap Times of Final Stint (as a non-pit-stint)",
"_____no_output_____"
]
],
[
[
"def get_last_stint_lap_times(pitStop_laps, race_lapTimes):\n # returns the last stint's lap times, since it is not followed by a pit stop .. so it is non-pit-stop stint \n\n last_pit = pitStop_laps[-1]\n return race_lapTimes[last_pit:]",
"_____no_output_____"
]
],
[
[
"### Test get_last_stint_lap_times function ",
"_____no_output_____"
]
],
[
[
"sample_input_pits = merged.iloc[13, :].laps_when_pitstop\nsample_input_lapTimes = merged.iloc[13, :].race_lap_times\n\nprint(\"input pits: \", sample_input_pits)\nprint(\"input laps: \", sample_input_lapTimes)\n\nprint(\"output: \", get_last_stint_lap_times(sample_input_pits, sample_input_lapTimes))",
"_____no_output_____"
],
[
"sample_input_pits = merged.iloc[1, :].laps_when_pitstop\nsample_input_lapTimes = merged.iloc[1, :].race_lap_times\n\nprint(\"input pits: \", sample_input_pits)\nprint(\"input laps: \", sample_input_lapTimes)\n\nprint(\"output: \", get_last_stint_lap_times(sample_input_pits, sample_input_lapTimes))",
"_____no_output_____"
]
],
[
[
"### Apply sequence partitioning function the merged data set",
"_____no_output_____"
]
],
[
[
"merged['stints'] = merged.apply(lambda x: partition_lapTime_into_sequences(x.laps_when_pitstop, x.race_lap_times), axis=1)\nmerged['last_stint'] = merged.apply(lambda x: get_last_stint_lap_times(x.laps_when_pitstop, x.race_lap_times), axis=1)\nmerged",
"_____no_output_____"
]
],
[
[
"Check if there are any missing stints",
"_____no_output_____"
]
],
[
[
"merged.isnull().sum()",
"_____no_output_____"
]
],
[
[
"There are some missing values based on the sequence partitioning transformation that we have just applied. Let's see where they are.",
"_____no_output_____"
]
],
[
[
"merged[merged.isnull().any(axis=1)]",
"_____no_output_____"
]
],
[
[
"As I have thought, all cases are just races when there was only one pit stop, on lap 1, so for the scope of this end-of-stint classifier we can safely remove theses cases, as they do not affect the task at hand",
"_____no_output_____"
]
],
[
[
"merged = merged.dropna()\nmerged",
"_____no_output_____"
],
[
"end_of_stint_sequences = merged['stints']\nend_of_stint_sequences[0]",
"_____no_output_____"
],
[
"last_stint_sequences = merged['last_stint']\nlast_stint_sequences[0]",
"_____no_output_____"
]
],
[
[
"We need to flatten the structure of the data. We need a list of lists, not a Pandas Series of lists of lists",
"_____no_output_____"
]
],
[
[
"temp = end_of_stint_sequences.tolist() # lists of lists of lists\nprint(\"Before:\", temp[0:3])\nprint()",
"_____no_output_____"
],
[
"# Use list.extend() to convert a a 3D list to a 2D lists\nend_of_stint_sequences = []\nfor elem in temp:\n end_of_stint_sequences.extend(elem) # this will make it lists of lists\n\nprint(\"After:\", end_of_stint_sequences[0:3])\nprint(\"Sample Size = \", len(end_of_stint_sequences))",
"_____no_output_____"
]
],
[
[
"### Generate not end of stint sequences --- this method did not work",
"_____no_output_____"
],
[
"My logic here is the following: Don't generate random laptimes, nor stints with random length.\n\nWhat I propose is: remove the last n laps from a real stint, and label it as a 'not-end-of-stint' kind of a sequence.\n\nThe parameter n needs to be experimented with: we need to figure out what kind of experiment setup works best for our binary classifier. \n\n - Initially what I am thinking is that I will remove the last lap, and create some fake samples... then remove the last 2 laps, and the last 4 laps, and create samples out of those too.\n - What I want to make sure is to not create a very unbalanced data set. What I am aiming for is 20-25% end-of-stint data, with 75-80% not-end-of stint data to comprise my data set which I will use to train my binary classifier.\n",
"_____no_output_____"
]
],
[
[
"def remove_lastN_elements(arr, N):\n return arr[:-N]",
"_____no_output_____"
],
[
"print(end_of_stint_sequences[0])\nprint()\nprint(remove_lastN_elements(end_of_stint_sequences[0], 2))",
"_____no_output_____"
],
[
"NOT_end_of_stint_sequences = []\n\n# N needs to be experimented with. Initially I chose N=1, N=2, and N=4\nfor lst in end_of_stint_sequences:\n temp_list = remove_lastN_elements(lst, N = 1) # remove last lap from each stint\n NOT_end_of_stint_sequences.append(temp_list)\n \n temp_list = remove_lastN_elements(lst, N = 2) # remove last 2 laps from each stint\n NOT_end_of_stint_sequences.append(temp_list)\n \n temp_list = remove_lastN_elements(lst, N = 4) # remove last 4 laps from each stint\n NOT_end_of_stint_sequences.append(temp_list)",
"_____no_output_____"
],
[
"#print(len(NOT_end_of_stint_sequences))\nprint(len(end_of_stint_sequences))",
"_____no_output_____"
]
],
[
[
"RESULT = 3:1 ratio between not-end-of-stint and end-os-stint data\n\nLet's create the labels:",
"_____no_output_____"
]
],
[
[
"end_of_stint_labels = [1] * len(end_of_stint_sequences)\nNOT_end_of_stint_labels = [0] * len(NOT_end_of_stint_sequences)",
"_____no_output_____"
]
],
[
[
"### Get NOT-end-of-stint sequences & Create final data set",
"_____no_output_____"
]
],
[
[
"NOT_end_of_stint_sequences = last_stint_sequences.tolist() \n\nNOT_end_of_stint_labels = [0] * len(NOT_end_of_stint_sequences)\nend_of_stint_labels = [1] * len(end_of_stint_sequences)\n\nprint(\"Labels:\")\nprint(len(NOT_end_of_stint_labels))\nprint(len(end_of_stint_labels))\n\nprint(\"\\nSequences:\")\nprint(len(NOT_end_of_stint_sequences))\nprint(len(end_of_stint_sequences))",
"_____no_output_____"
],
[
"stint_sequences = end_of_stint_sequences + NOT_end_of_stint_sequences\nstint_labels = end_of_stint_labels + NOT_end_of_stint_labels\n\nprint(len(stint_sequences))\nprint(len(stint_labels))",
"_____no_output_____"
]
],
[
[
"## Binary Classifier\n\nI view this task as a Sequence Classification task, where deep learning approaches have been widely used in practice for similar tasks such as: \n\n - DNA Sequence Classification: Given a DNA sequence of ACGT values, predict whether the sequence codes for a coding or non-coding region.\n - Anomaly Detection: Given a sequence of observations, predict whether the sequence is anomalous or not.\n - Sentiment Analysis: Given a sequence of text such as a review or a tweet, predict whether sentiment of the text is positive or negative.\n \nReference: https://machinelearningmastery.com/sequence-prediction/\n \nI have done some research on the problem and the most common approaches seem to be using LSTM (Long-Short-Term-Memory) Recurrent Neural Networks. In the upcoming subsections I will test various architectures of different LSTM and maybe even non-LSTM Recurrent Neural Networks to see some results, then evaluate if we need to use some other binary classifier, or we actually just need more data, or better data, or we need to apply some techniques used when working with imbalaced data (undersampling, oversampling).",
"_____no_output_____"
],
[
"### Split data into train and test sets",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(stint_sequences, stint_labels, test_size=0.20, random_state=7)\n\nprint(\"Train set:\", len(X_train))\nprint(\"Test set:\", len(X_test))\nprint(\"Train labels:\", len(y_train))\nprint(\"Test labels:\", len(y_test))",
"_____no_output_____"
]
],
[
[
"### Pad Input Sequences ",
"_____no_output_____"
]
],
[
[
"# find out what's the longest stint in our data set\n#max_stint_length = max(map(len, end_of_stint_sequences))\n#print(\"Max stint-length =\", max_stint_length)\n\nmax_stint_length = 30 ",
"_____no_output_____"
],
[
"from keras.preprocessing import sequence\n\nX_train = sequence.pad_sequences(X_train, maxlen=max_stint_length, padding=\"pre\", truncating='pre')\nX_test = sequence.pad_sequences(X_test, maxlen=max_stint_length, padding=\"pre\", truncating='pre')",
"_____no_output_____"
]
],
[
[
"Wrap every list into numpy arrays, so Keras can process the input",
"_____no_output_____"
]
],
[
[
"X_train = np.array(X_train)\nX_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)\n\nX_test = np.array(X_test)\nX_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)\n\ny_train = np.array(y_train)\ny_train = y_train.reshape(y_train.shape[0], 1)\n\ny_test = np.array(y_test)\ny_test = y_test.reshape(y_test.shape[0], 1)",
"_____no_output_____"
]
],
[
[
"testing the ratio of 0 to 1 in the train and test set",
"_____no_output_____"
]
],
[
[
"unique, frequency = np.unique(y_test, \n return_counts = True) \n# print unique values array \nprint(\"Unique Values:\", \n unique) \n \n# print frequency array \nprint(\"Frequency Values:\", \n frequency)",
"_____no_output_____"
],
[
"unique, frequency = np.unique(y_train, \n return_counts = True) \n# print unique values array \nprint(\"Unique Values:\", \n unique) \n \n# print frequency array \nprint(\"Frequency Values:\", \n frequency)",
"_____no_output_____"
]
],
[
[
"### Approach 1: Simple LSTM for Sequence Classification",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\n\n# fix random seed for reproducibility\nnp.random.seed(7)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(LSTM(100, input_shape=(max_stint_length, 1)))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=128)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(LSTM(64, input_shape=(max_stint_length, 1), return_sequences=True))\nmodel.add(LSTM(32))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=128)",
"_____no_output_____"
]
],
[
[
"### Approach 2: Time Distributed LSTM",
"_____no_output_____"
]
],
[
[
"from keras.models import Sequential\nfrom keras.layers import Dense, TimeDistributed, LSTM, Dropout\n\n# fix random seed for reproducibility\nnp.random.seed(7)",
"_____no_output_____"
],
[
"model = Sequential()\nmodel.add(LSTM(512, input_shape=(max_stint_length, 1), return_sequences=True))\n#model.add(Dropout(0.3))\nmodel.add(LSTM(256, return_sequences=True))\nmodel.add(LSTM(128, return_sequences=True))\nmodel.add(LSTM(64, return_sequences=True))\nmodel.add(TimeDistributed(Dense(1, activation='sigmoid')))\n\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\n\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64)",
"_____no_output_____"
]
],
[
[
"### Approach 3: Bidirectional LSTMs",
"_____no_output_____"
]
],
[
[
"from keras.layers import Bidirectional\n\n# define LSTM model\nmodel = Sequential()\nmodel.add(Bidirectional(LSTM(128, return_sequences=True), input_shape=(max_stint_length, 1)))\n#model.add(Dropout(0.2))\nmodel.add(Bidirectional(LSTM(64, return_sequences=True)))\n#model.add(Dropout(0.2))\n#model.add(Bidirectional(LSTM(64, return_sequences=True)))\n#model.add(Bidirectional(LSTM(64)))\n#model.add(Dropout(0.2))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nprint(model.summary())\n\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=64)",
"_____no_output_____"
]
],
[
[
"### Approach 4: LSTM and CNNs combined",
"_____no_output_____"
]
],
[
[
"from keras.layers.convolutional import Conv1D\nfrom keras.layers.convolutional import MaxPooling1D\n\nmodel = Sequential()\nmodel.add(Conv1D(filters=256, kernel_size=3, padding='same', activation='relu', input_shape=(max_stint_length, 1)))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Conv1D(filters=128, kernel_size=3, padding='same', activation='relu'))\nmodel.add(MaxPooling1D(pool_size=2))\nmodel.add(Bidirectional(LSTM(64, return_sequences=True)))\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(1, activation='sigmoid'))\n\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\n\nprint(model.summary())\n\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=128)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a25db228026476e85bc5f07b9253901bb84b125
| 689 |
ipynb
|
Jupyter Notebook
|
behavior Planning/cost_function_1/cost.h.ipynb
|
enesdoruk/Autonom-Vehicles-Projects
|
0cf4de4fe10eb0f5c886eca8fb5117b0121ae53b
|
[
"MIT"
] | 2 |
2022-01-20T14:34:28.000Z
|
2022-02-10T22:12:37.000Z
|
behavior Planning/cost_function_1/cost.h.ipynb
|
enesdoruk/Autonom-Vehicles-Projects
|
0cf4de4fe10eb0f5c886eca8fb5117b0121ae53b
|
[
"MIT"
] | null | null | null |
behavior Planning/cost_function_1/cost.h.ipynb
|
enesdoruk/Autonom-Vehicles-Projects
|
0cf4de4fe10eb0f5c886eca8fb5117b0121ae53b
|
[
"MIT"
] | null | null | null | 19.138889 | 85 | 0.506531 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a25e118cdd51c70420b81c1f9163bb5a563ed5d
| 8,117 |
ipynb
|
Jupyter Notebook
|
python_materials/learn-python3/notebooks/beginner/notebooks/for_loops.ipynb
|
NguyenCuuNguyen/CS488S21
|
7c585a40af791f34090ad6979941e23bcf1ddfa1
|
[
"MIT"
] | null | null | null |
python_materials/learn-python3/notebooks/beginner/notebooks/for_loops.ipynb
|
NguyenCuuNguyen/CS488S21
|
7c585a40af791f34090ad6979941e23bcf1ddfa1
|
[
"MIT"
] | null | null | null |
python_materials/learn-python3/notebooks/beginner/notebooks/for_loops.ipynb
|
NguyenCuuNguyen/CS488S21
|
7c585a40af791f34090ad6979941e23bcf1ddfa1
|
[
"MIT"
] | null | null | null | 21.878706 | 106 | 0.381668 |
[
[
[
"# [`for` loops](https://docs.python.org/3/tutorial/controlflow.html#for-statements)",
"_____no_output_____"
],
[
"## Looping lists",
"_____no_output_____"
]
],
[
[
"my_list = [1, 2, 3, 4, 'Python', 'is', 'neat']\nfor item in my_list:\n print(item)",
"1\n2\n3\n4\nPython\nis\nneat\n"
]
],
[
[
"### `break`\nStop the execution of the loop.",
"_____no_output_____"
]
],
[
[
"for item in my_list:\n if item == 'Python':\n break\n print(item)",
"1\n2\n3\n4\n"
]
],
[
[
"### `continue`\nContinue to the next item without executing the lines occuring after `continue` inside the loop.",
"_____no_output_____"
]
],
[
[
"for item in my_list:\n if item == 1:\n continue\n print(item)",
"2\n3\n4\nPython\nis\nneat\n"
]
],
[
[
"### `enumerate()`\nIn case you need to also know the index:",
"_____no_output_____"
]
],
[
[
"for idx, val in enumerate(my_list):\n print('idx: {}, value: {}'.format(idx, val))",
"idx: 0, value: 1\nidx: 1, value: 2\nidx: 2, value: 3\nidx: 3, value: 4\nidx: 4, value: Python\nidx: 5, value: is\nidx: 6, value: neat\n"
]
],
[
[
"## Looping dictionaries",
"_____no_output_____"
]
],
[
[
"my_dict = {'hacker': True, 'age': 72, 'name': 'John Doe'}\nfor val in my_dict:\n print(val)",
"hacker\nage\nname\n"
],
[
"for key, val in my_dict.items():\n print('{}={}'.format(key, val))",
"hacker=True\nage=72\nname=John Doe\n"
]
],
[
[
"## `range()`",
"_____no_output_____"
]
],
[
[
"for number in range(5):\n print(number)",
"0\n1\n2\n3\n4\n"
],
[
"for number in range(2, 5):\n print(number)",
"2\n3\n4\n"
]
],
[
[
"Print 2 3 4 and stop at 4, BEFORE 5",
"_____no_output_____"
]
],
[
[
"for number in range(0, 10, 2): # last one is step\n print(number)",
"0\n2\n4\n6\n8\n"
],
[
"Print from 0 to 8 and stop at 5",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a25e70c179eda02331499a876959df63499b462
| 314,537 |
ipynb
|
Jupyter Notebook
|
W1_D1_ML_HeuristicModel.ipynb
|
Nadda1004/Intro_Machine_learning
|
1f8df4215ae6d6350bea39d239f0a46fc5cea687
|
[
"Apache-2.0"
] | 1 |
2021-02-07T09:33:43.000Z
|
2021-02-07T09:33:43.000Z
|
W1_D1_ML_HeuristicModel.ipynb
|
Nadda1004/Intro_Machine_learning
|
1f8df4215ae6d6350bea39d239f0a46fc5cea687
|
[
"Apache-2.0"
] | null | null | null |
W1_D1_ML_HeuristicModel.ipynb
|
Nadda1004/Intro_Machine_learning
|
1f8df4215ae6d6350bea39d239f0a46fc5cea687
|
[
"Apache-2.0"
] | null | null | null | 272.326407 | 143,310 | 0.897583 |
[
[
[
"<a href=\"https://colab.research.google.com/github/Nadda1004/Intro_Machine_learning/blob/main/W1_D1_ML_HeuristicModel.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Predicting Rain in Seattle\r\nSeattle is one of the rainiest places in the world. Even so, it is worth asking the question 'will it rain tomorrow.' Imagine you are headed to sleep at a hotel in downtown Seattle. \r\n\r\nThe next days activities are supposed to include walking around outside most of the day. You want to know if it will rain or not (you don't really care how much rain just a simple yes or no will do), which will greatly impact what you choose to wear and carry around (like an umbrella). \r\n\r\n\r\nBuild a heuristic model to predict if it will rain tomorrow.\r\n\r\n",
"_____no_output_____"
],
[
"## Our Data",
"_____no_output_____"
]
],
[
[
"import pandas as pd\r\n\r\ndf = pd.read_csv('https://raw.githubusercontent.com/gumdropsteve/datasets/master/seattle_weather_1948-2017.csv')\r\n\r\ndf",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 25551 entries, 0 to 25550\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ds 25551 non-null object \n 1 prcp 25548 non-null float64\n 2 tmax 25551 non-null int64 \n 3 tmin 25551 non-null int64 \n 4 rain 25548 non-null object \ndtypes: float64(1), int64(2), object(2)\nmemory usage: 998.2+ KB\n"
],
[
"#since the ds is representing the date but its not in date time format i will convert it to datetime format\r\ndf.ds = pd.to_datetime(df['ds'])",
"_____no_output_____"
],
[
"df.info()",
"<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 25551 entries, 0 to 25550\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ds 25551 non-null datetime64[ns]\n 1 prcp 25548 non-null float64 \n 2 tmax 25551 non-null int64 \n 3 tmin 25551 non-null int64 \n 4 rain 25548 non-null object \ndtypes: datetime64[ns](1), float64(1), int64(2), object(1)\nmemory usage: 998.2+ KB\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"import numpy as np\r\n\r\n# what % of days did it rain?\r\nrainy = (df.rain.value_counts()[1] / df.shape[0]) * 100\r\nprint('The Percenatge of Rained Days {:.3f}%'.format(rainy))",
"The Percenatge of Rained Days 42.660%\n"
],
[
"# what values are seen in the prcp column\r\ndf.prcp.value_counts()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\r\nplt.figure(figsize=(15,7))\r\ndf.prcp.plot.hist(bins = 20).set(title = 'Values Range in Prcp');",
"_____no_output_____"
],
[
"# show me a histogram of prcp < 2\r\nplt.figure(figsize=(15,7))\r\ndf.loc[df.prcp < 2].prcp.plot.hist(bins = 20).set(title = 'Values < 2 in Prcp');",
"_____no_output_____"
]
],
[
[
"#### Check for Missing Values and Outliers",
"_____no_output_____"
]
],
[
[
"# how many null values does each column have?\r\ndf.isnull().sum()",
"_____no_output_____"
],
[
"# show me the null rows\r\ndf.loc[df.isnull().any(axis=1)]",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"# drop the null rows and update the dataframe\r\ndf1 = df.dropna()\r\ndf1",
"_____no_output_____"
],
[
"import seaborn as sns\r\n\r\n# make a box plot\r\nplt.figure(figsize=(15,7))\r\nsns.boxplot(data=df1).set(title = 'Boxplot for all columns');",
"_____no_output_____"
],
[
"# show me some outler values from tmax or tmin\r\nplt.figure(figsize=(15,7))\r\nsns.boxplot(data=[df1.tmin , df1.tmax]).set(title = 'Boxplot for minimum temperature and maximum temperature' );\r\nplt.xlabel('Min and Max Temp');",
"_____no_output_____"
],
[
"# make an sns pairplot with hue='rain'\r\nsns.pairplot(data = df1 , hue = 'rain');",
"/usr/local/lib/python3.6/dist-packages/seaborn/distributions.py:306: UserWarning: Dataset has 0 variance; skipping density estimate.\n warnings.warn(msg, UserWarning)\n"
],
[
"# bonus challenge\r\n# plot prcp by day (ds on x axis)\r\nplt.figure(figsize=(40,10))\r\nsns.lineplot(x = df1.ds , y = df1.prcp).set(title = 'Prcp Values Over The Years');",
"_____no_output_____"
]
],
[
[
"## Set up a basic model to make predicitons\r\n\r\nFirst, split the data...",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\r\n\r\nX = df1[['prcp', 'tmax', 'tmin']] # all the values you want to help predict the target value\r\ny = df1.rain.astype(np.int32) # the target value\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)",
"_____no_output_____"
]
],
[
[
"Bring in a model now...",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\r\n\r\n# logistic regression is a classifier, for our case, True (1) or False (0)\r\nlr = LogisticRegression()\r\n\r\nlr",
"_____no_output_____"
],
[
"lr.fit(X=X_train, y=y_train)",
"_____no_output_____"
],
[
"# predict the y values from X test data\r\nlr.predict(X_test)",
"_____no_output_____"
],
[
"preds = lr.predict(X_test)",
"_____no_output_____"
],
[
"# how'd your model score?\r\nfrom sklearn.metrics import accuracy_score\r\naccuracy_score(y_test, preds) * 100",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a25eb849c0e03b947751a26d1156fe53f7e2707
| 47,917 |
ipynb
|
Jupyter Notebook
|
Mathematics/Statistics/Statistics and Probability Python Notebooks/General Stats Notebooks/7_pymc.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | null | null | null |
Mathematics/Statistics/Statistics and Probability Python Notebooks/General Stats Notebooks/7_pymc.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | null | null | null |
Mathematics/Statistics/Statistics and Probability Python Notebooks/General Stats Notebooks/7_pymc.ipynb
|
okara83/Becoming-a-Data-Scientist
|
f09a15f7f239b96b77a2f080c403b2f3e95c9650
|
[
"MIT"
] | 2 |
2022-02-09T15:41:33.000Z
|
2022-02-11T07:47:40.000Z
| 211.088106 | 40,912 | 0.908675 |
[
[
[
"# 7.7. Fitting a Bayesian model by sampling from a posterior distribution with a Markov Chain Monte Carlo method",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport pymc3 as pm\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"Collecting pymc3\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/42/c2/86e8be42b99d64932fa12611b502882a5f4d834b6d1d126bf3f956ad6428/pymc3-3.7-py3-none-any.whl (856kB)\n\u001b[K |████████████████████████████████| 860kB 5.4MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: tqdm>=4.8.4 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (4.31.1)\nRequirement already satisfied: h5py>=2.7.0 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (2.9.0)\nCollecting theano>=1.0.4 (from pymc3)\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/7d/c4/6341148ad458b6cd8361b774d7ee6895c38eab88f05331f22304c484ed5d/Theano-1.0.4.tar.gz (2.8MB)\n\u001b[K |████████████████████████████████| 2.8MB 8.2MB/s eta 0:00:01\n\u001b[?25hRequirement already satisfied: pandas>=0.18.0 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (0.24.2)\nRequirement already satisfied: numpy>=1.13.0 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (1.16.3)\nRequirement already satisfied: scipy>=0.18.1 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (1.2.1)\nRequirement already satisfied: patsy>=0.4.0 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pymc3) (0.5.1)\nRequirement already satisfied: six in /Users/okara/anaconda3/lib/python3.7/site-packages (from h5py>=2.7.0->pymc3) (1.12.0)\nRequirement already satisfied: pytz>=2011k in /Users/okara/anaconda3/lib/python3.7/site-packages (from pandas>=0.18.0->pymc3) (2018.9)\nRequirement already satisfied: python-dateutil>=2.5.0 in /Users/okara/anaconda3/lib/python3.7/site-packages (from pandas>=0.18.0->pymc3) (2.8.0)\nBuilding wheels for collected packages: theano\n Building wheel for theano (setup.py) ... \u001b[?25ldone\n\u001b[?25h Stored in directory: /Users/okara/Library/Caches/pip/wheels/88/fb/be/483910ff7e9f703f30a10605ad7605f3316493875c86637014\nSuccessfully built theano\nInstalling collected packages: theano, pymc3\nSuccessfully installed pymc3-3.7 theano-1.0.4\n"
],
[
"# www.ncdc.noaa.gov/ibtracs/index.php?name=wmo-data\ndf = pd.read_csv('https://github.com/ipython-books/'\n 'cookbook-2nd-data/blob/master/'\n 'Allstorms.ibtracs_wmo.v03r05.csv?'\n 'raw=true',\n delim_whitespace=False)",
"_____no_output_____"
],
[
"cnt = df[df['Basin'] == ' NA'].groupby(\n 'Season')['Serial_Num'].nunique()\n# The years from 1851 to 2012.\nyears = cnt.index\ny0, y1 = years[0], years[-1]\narr = cnt.values",
"_____no_output_____"
],
[
"# Plot the annual number of storms.\nfig, ax = plt.subplots(1, 1, figsize=(8, 4))\nax.plot(years, arr, '-o')\nax.set_xlim(y0, y1)\nax.set_xlabel(\"Year\")\nax.set_ylabel(\"Number of storms\")",
"_____no_output_____"
],
[
"# We define our model.\nwith pm.Model() as model:\n # We define our three variables.\n switchpoint = pm.DiscreteUniform(\n 'switchpoint', lower=y0, upper=y1)\n early_rate = pm.Exponential('early_rate', 1)\n late_rate = pm.Exponential('late_rate', 1)\n # The rate of the Poisson process is a piecewise\n # constant function.\n rate = pm.math.switch(switchpoint >= years,\n early_rate, late_rate)\n # The annual number of storms per year follows\n # a Poisson distribution.\n storms = pm.Poisson('storms', rate, observed=arr)",
"_____no_output_____"
],
[
"with model:\n trace = pm.sample(10000)",
"_____no_output_____"
],
[
"pm.traceplot(trace)",
"_____no_output_____"
],
[
"s = trace['switchpoint'].mean()\nem = trace['early_rate'].mean()\nlm = trace['late_rate'].mean()\ns, em, lm",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1, 1, figsize=(8, 4))\nax.plot(years, arr, '-o')\nax.axvline(s, color='k', ls='--')\nax.plot([y0, s], [em, em], '-', lw=3)\nax.plot([s, y1], [lm, lm], '-', lw=3)\nax.set_xlim(y0, y1)\nax.set_xlabel(\"Year\")\nax.set_ylabel(\"Number of storms\")",
"_____no_output_____"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a260cf464e7c273e72c9c7e89d96c5828a1a02c
| 25,165 |
ipynb
|
Jupyter Notebook
|
C4-Applied_Text_Mining_in_Python/week_3/Case+Study+-+Sentiment+Analysis.ipynb
|
urbanclimatefr/Coursera-Applied-Data-Science-with-Python
|
85a74505c97849dc1bb27c139f1281831362b15d
|
[
"MIT"
] | null | null | null |
C4-Applied_Text_Mining_in_Python/week_3/Case+Study+-+Sentiment+Analysis.ipynb
|
urbanclimatefr/Coursera-Applied-Data-Science-with-Python
|
85a74505c97849dc1bb27c139f1281831362b15d
|
[
"MIT"
] | null | null | null |
C4-Applied_Text_Mining_in_Python/week_3/Case+Study+-+Sentiment+Analysis.ipynb
|
urbanclimatefr/Coursera-Applied-Data-Science-with-Python
|
85a74505c97849dc1bb27c139f1281831362b15d
|
[
"MIT"
] | null | null | null | 29.295693 | 294 | 0.460759 |
[
[
[
"---\n\n_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._\n\n---",
"_____no_output_____"
],
[
"*Note: Some of the cells in this notebook are computationally expensive. To reduce runtime, this notebook is using a subset of the data.*",
"_____no_output_____"
],
[
"# Case Study: Sentiment Analysis",
"_____no_output_____"
],
[
"### Data Prep",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\n\n# Read in the data\ndf = pd.read_csv('Amazon_Unlocked_Mobile.csv')\n\n# Sample the data to speed up computation\n# Comment out this line to match with lecture\ndf = df.sample(frac=0.1, random_state=10)\n\ndf.head()",
"_____no_output_____"
],
[
"# Drop missing values\ndf.dropna(inplace=True)\n\n# Remove any 'neutral' ratings equal to 3\ndf = df[df['Rating'] != 3]\n\n# Encode 4s and 5s as 1 (rated positively)\n# Encode 1s and 2s as 0 (rated poorly)\ndf['Positively Rated'] = np.where(df['Rating'] > 3, 1, 0)\ndf.head(10)",
"_____no_output_____"
],
[
"# Most ratings are positive\ndf['Positively Rated'].mean()",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\n\n# Split data into training and test sets\nX_train, X_test, y_train, y_test = train_test_split(df['Reviews'], \n df['Positively Rated'], \n random_state=0)",
"_____no_output_____"
],
[
"print('X_train first entry:\\n\\n', X_train.iloc[0])\nprint('\\n\\nX_train shape: ', X_train.shape)",
"X_train first entry:\n\n Everything about it is awesome!\n\n\nX_train shape: (23052,)\n"
]
],
[
[
"# CountVectorizer",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import CountVectorizer\n\n# Fit the CountVectorizer to the training data\nvect = CountVectorizer().fit(X_train)",
"_____no_output_____"
],
[
"vect.get_feature_names()[::2000]",
"_____no_output_____"
],
[
"len(vect.get_feature_names())",
"_____no_output_____"
],
[
"# transform the documents in the training data to a document-term matrix\nX_train_vectorized = vect.transform(X_train)\n\nX_train_vectorized",
"_____no_output_____"
],
[
"from sklearn.linear_model import LogisticRegression\n\n# Train the model\nmodel = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)",
"_____no_output_____"
],
[
"from sklearn.metrics import roc_auc_score\n\n# Predict the transformed test documents\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))",
"AUC: 0.897433277667\n"
],
[
"# get the feature names as numpy array\nfeature_names = np.array(vect.get_feature_names())\n\n# Sort the coefficients from the model\nsorted_coef_index = model.coef_[0].argsort()\n\n# Find the 10 smallest and 10 largest coefficients\n# The 10 largest coefficients are being indexed using [:-11:-1] \n# so the list returned is in order of largest to smallest\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))",
"Smallest Coefs:\n['worst' 'terrible' 'slow' 'junk' 'poor' 'sucks' 'horrible' 'useless'\n 'waste' 'disappointed']\n\nLargest Coefs: \n['excelent' 'excelente' 'excellent' 'perfectly' 'love' 'perfect' 'exactly'\n 'great' 'best' 'awesome']\n"
]
],
[
[
"# Tfidf",
"_____no_output_____"
]
],
[
[
"from sklearn.feature_extraction.text import TfidfVectorizer\n\n# Fit the TfidfVectorizer to the training data specifiying a minimum document frequency of 5\nvect = TfidfVectorizer(min_df=5).fit(X_train)\nlen(vect.get_feature_names())",
"_____no_output_____"
],
[
"X_train_vectorized = vect.transform(X_train)\n\nmodel = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)\n\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))",
"AUC: 0.889951006492\n"
],
[
"feature_names = np.array(vect.get_feature_names())\n\nsorted_tfidf_index = X_train_vectorized.max(0).toarray()[0].argsort()\n\nprint('Smallest tfidf:\\n{}\\n'.format(feature_names[sorted_tfidf_index[:10]]))\nprint('Largest tfidf: \\n{}'.format(feature_names[sorted_tfidf_index[:-11:-1]]))",
"Smallest tfidf:\n['61' 'printer' 'approach' 'adjustment' 'consequences' 'length' 'emailing'\n 'degrees' 'handsfree' 'chipset']\n\nLargest tfidf: \n['unlocked' 'handy' 'useless' 'cheat' 'up' 'original' 'exelent' 'exelente'\n 'exellent' 'satisfied']\n"
],
[
"sorted_coef_index = model.coef_[0].argsort()\n\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))",
"Smallest Coefs:\n['not' 'slow' 'disappointed' 'worst' 'terrible' 'never' 'return' 'doesn'\n 'horrible' 'waste']\n\nLargest Coefs: \n['great' 'love' 'excellent' 'good' 'best' 'perfect' 'price' 'awesome' 'far'\n 'perfectly']\n"
],
[
"# These reviews are treated the same by our current model\nprint(model.predict(vect.transform(['not an issue, phone is working',\n 'an issue, phone is not working'])))",
"[0 0]\n"
]
],
[
[
"# n-grams",
"_____no_output_____"
]
],
[
[
"# Fit the CountVectorizer to the training data specifiying a minimum \n# document frequency of 5 and extracting 1-grams and 2-grams\nvect = CountVectorizer(min_df=5, ngram_range=(1,2)).fit(X_train)\n\nX_train_vectorized = vect.transform(X_train)\n\nlen(vect.get_feature_names())",
"_____no_output_____"
],
[
"model = LogisticRegression()\nmodel.fit(X_train_vectorized, y_train)\n\npredictions = model.predict(vect.transform(X_test))\n\nprint('AUC: ', roc_auc_score(y_test, predictions))",
"AUC: 0.91106617946\n"
],
[
"feature_names = np.array(vect.get_feature_names())\n\nsorted_coef_index = model.coef_[0].argsort()\n\nprint('Smallest Coefs:\\n{}\\n'.format(feature_names[sorted_coef_index[:10]]))\nprint('Largest Coefs: \\n{}'.format(feature_names[sorted_coef_index[:-11:-1]]))",
"Smallest Coefs:\n['no good' 'junk' 'poor' 'slow' 'worst' 'broken' 'not good' 'terrible'\n 'defective' 'horrible']\n\nLargest Coefs: \n['excellent' 'excelente' 'excelent' 'perfect' 'great' 'love' 'awesome'\n 'no problems' 'good' 'best']\n"
],
[
"# These reviews are now correctly identified\nprint(model.predict(vect.transform(['not an issue, phone is working',\n 'an issue, phone is not working'])))",
"[1 0]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a260d2474e4aa235fd9cff9d99ae1e416135bad
| 28,428 |
ipynb
|
Jupyter Notebook
|
01_sargassum_detection_coast.ipynb
|
octo-willy/Sargassum-Sensing
|
1fca7dcabf2bddc0c931de196f7f287adb626b75
|
[
"MIT"
] | null | null | null |
01_sargassum_detection_coast.ipynb
|
octo-willy/Sargassum-Sensing
|
1fca7dcabf2bddc0c931de196f7f287adb626b75
|
[
"MIT"
] | null | null | null |
01_sargassum_detection_coast.ipynb
|
octo-willy/Sargassum-Sensing
|
1fca7dcabf2bddc0c931de196f7f287adb626b75
|
[
"MIT"
] | null | null | null | 39.703911 | 369 | 0.606339 |
[
[
[
"<b>Detection of Sargassum on the coast and coastal waters</b> \nNotebook for classifying and analyzing Sargassum in Bonaire with Sentinel-2 images\n\n* Decision Tree Classifier (DTC) and Maximum Likelihood Classifier (MLC) are employed\n* Training sites covering 8 different classes are used to extract pixel values (training samples) over all Sentinel-2 bands\n* 12 Sentinel bands and 8 spectral indices evaluated using Jeffries-Matusita distance (selected: NDVI, REP, B05 and B11) \n* 80:20 train-test ratio for splitting the training samples\n* K-Fold cross-validation performed for tuning the DTC model\n* MLC model developed with 4 different chi-square thresholds: 0% (base), 10%,20%,50%\n",
"_____no_output_____"
]
],
[
[
"import os\nimport re\nimport pandas as pd\nimport numpy as np\nimport rasterio as rio\nfrom rasterio import Affine\nfrom rasterio.mask import mask\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom glob import glob\nimport geopandas as gpd\nfrom joblib import dump,load\nfrom rasterstats import zonal_stats\nfrom tqdm import tqdm,tqdm_notebook\n\n#custom functions\nfrom Python.prep_raster import stack_bands,clip_raster,pixel_sample,computeIndexStack\nfrom Python.data_treat import balance_sample,down_sample\nfrom Python.spec_analysis import transpose_df,jmd2df\nfrom Python.data_viz import specsign_plot,jmd_heatmap,ridgePlot,validation_curve_plot\nfrom Python.mlc import mlClassifier\nfrom Python.calc_acc import calc_acc\nfrom Python.pred_raster import stack2pred, dtc_pred_stack\nfrom Python.misc import get_feat_layer_order\n\n#sklearn functions\nfrom sklearn.model_selection import train_test_split,validation_curve\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.tree import DecisionTreeClassifier\n\n#setup IO directories\nparent_dir = os.path.join(os.path.abspath('..'),'objective1') #change according to preference\nsub_dirs = ['fullstack','clippedstack','indexstack','predicted','stack2pred']\nmake_dirs = [os.makedirs(os.path.join(parent_dir,name),exist_ok=True) for name in sub_dirs]",
"_____no_output_____"
]
],
[
[
"<b>Sentinel-2 data preparation</b>\n* Resample coarse bands to 10m resolution\n* Stack multiband images \n* Calculate spectral indices",
"_____no_output_____"
]
],
[
[
"#dates considered for classification and analysis \ndates = [20180304,20180309,20180314,20180319,20190108,20190128,20190212,20190304,20190309, \n 20190314,20190319,20190508,20190513,20190518,20190523,20190821,20191129]\n\n#band names\nbands = ['B01_60m','B02_10m','B03_10m','B04_10m','B05_20m','B06_20m',\n 'B07_20m','B08_10m','B8A_20m','B09_60m','B11_20m','B12_20m']\n\n#get product file paths according to dates and tile ID T19PEP (covers Bonaire)\nlevel2_dir = '...' #change according to preference\nlevel2_files = glob(level2_dir+\"/*.SAFE\")\nscene_paths=[file for date in dates for file in level2_files if str(date) in file and 'T19PEP' in file]\n\n#sort multiband image paths according to date\nimage_collection ={}\n\nfor scene in scene_paths:\n date = re.findall(r\"(\\d{8})T\", scene)[0]\n \n #collect all .jp2 band images in SAFE directory\n all_images = [f for f in glob(scene + \"*/**/*.jp2\", recursive=True)]\n img_paths = [img_path for band in bands for img_path in all_images if band in img_path]\n image_collection[date] = img_paths\n\n#check nr. of images per date\nfor key in image_collection.keys():print(f'Date: {key} Images: {len(image_collection[key])}')",
"_____no_output_____"
],
[
"#stack multiband images to a geotiff (!computationaly intensive)\nfor date in tqdm(image_collection.keys(),position=0, leave=True):\n ref10m= image_collection[date][1] #use band B02 (10m) as reference metadata\n outfile = os.path.join(parent_dir,'fullstack',f'stack_{date}.tif')\n stack_bands(image_collection[date],ref10m,outfile)\n",
"_____no_output_____"
],
[
"#crop multiband image stack and compute spectral indices\nroi_file = './data/boundaries/coastline_lacbay.geojson' #polygon for cropping image\nindices = ['NDVI','REP','FAI','GNDVI','NDVI_B8A','VB_FAH','SEI','SABI'] #list of indices used in the study\n\nstack_files = glob(parent_dir + \"/fullstack/*.tif\")\nfor stack_file in tqdm(stack_files,position=0, leave=True):\n filename = os.path.basename(stack_file).split('.')[0]\n \n #cropping\n clip_outfile = os.path.join(parent_dir,'clippedstack',filename+\"_clipped.tif\")\n clip_raster(stack_file,roi_file,clip_outfile,fill=True,nodat=0)\n \n #compute spectral indices\n index_outfile = os.path.join(index_dir,filename+\"_index.tif\")\n computeIndexStack(clip_outfile,indices,index_outfile)\n ",
"_____no_output_____"
]
],
[
[
"<b>Sample pixel values from multiband images based on training sites</b> \n* Training scenes from 4,9,14 and 19 March 2019",
"_____no_output_____"
]
],
[
[
"#get training sites and corresponding images\ntrain_sites = [f for f in glob(r\".\\data\\training_input\\objective1\\*_coast.geojson\")] \ndates = [20190304,20190309,20190314,20190319] \nstack_bands = [f for date in dates for f in glob(parent_dir+'/clipped*/*_clipped.tif') if str(date) in f] \nindex_bands = [f for date in dates for f in glob(parent_dir+'/index*/*_index.tif') if str(date) in f] \n\n#bands and indices to be sampled\nband_names = ['B01','B02','B03','B04','B05','B06','B07','B08','B8A','B09','B11','B12']\nindices = ['NDVI','REP','FAI','GNDVI','NDVI-B8A','VB-FAH','SEI','SABI']\n\ndataset = []\nfor i in range(len(train_sites)):\n \n #sample multibands and spectral indices\n df_bands= pixel_sample(stack_bands[i],train_sites[i],band_names)\n df_indices= pixel_sample(index_bands[i],train_sites[i],indices)\n df_sample = pd.concat([df_bands,df_indices],axis=1)\n df_sample = df_sample.loc[:,~df_sample.columns.duplicated()]\n \n #downsample based on floating Sargassum (Sf)\n df_downsampled = down_sample(df_sample,'C','Sf')\n dataset.append(df_downsampled)\n \n#final dataset\ndataset=pd.concat(dataset,sort=False).reset_index(drop=True) \ndataset.to_csv(r'./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv',index=False)",
"_____no_output_____"
]
],
[
[
"<b>Expore spectral signature</b> \n* Jeffries-Matusita distance (JMD) used for feature selection ([reference](https://books.google.nl/books?id=RxHbb3enITYC&pg=PA52&lpg=PA52&dq=for+one+feature+and+two+classes+the+Bhattacharyya+distance+is+given+by&source=bl&ots=sTKLGl1POo&sig=ACfU3U2s7tv0LT9vfSUat98l4L9_dyUgeg&hl=nl&sa=X&ved=2ahUKEwiKgeHYwI7lAhWIIlAKHZfJAC0Q6AEwBnoECAkQAQ#v=onepage&q&f=false))\n* NDVI, REP, B05 and B11 are selected as input features for the classifiers",
"_____no_output_____"
]
],
[
[
"#load training sample\ndf = pd.read_csv('./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv')\n\n#plot spectral signature focused on 4 subclasses\nspecsign_plot(df,df.columns[4:16],classtype='C')\n\n#plot JMD heatmap for each band\njmd_bands = [jmd2df(transpose_df(df,'C',band)) for band in df.columns[4:16]]\njmd_heatmap(jmd_bands)\n\n#plot JMD heatmap for each spectral index\njmd_indices = [jmd2df(transpose_df(df,'C',band)) for band in df.columns[16:]]\njmd_heatmap(jmd_indices)\n\n#plot distribution of selected input features\nsns.set_style('white')\nridgePlot(df[['C','NDVI','REP','B05','B11']],'C')",
"_____no_output_____"
]
],
[
[
"<b>Build classifiers</b> ",
"_____no_output_____"
]
],
[
[
"#load training sample\ndf = pd.read_csv('./data/training_input/csv/training_samples_20190304_20190319_sargassum.csv')\npredictors = ['NDVI','REP','B05','B11']\nsubset_df = df[['C']+predictors]\n\n#split into train and test datasets 80:20\ntrain,test = train_test_split(subset_df, train_size = 0.8,random_state=1,shuffle=True,stratify=np.array(subset_df['C']))\ntrain = train.sort_values(by='C',ascending=True) #sort labels\n\n#split pedictors from labels (for DTC)\nle = LabelEncoder()\nX_train,y_train = train[predictors],le.fit_transform(train['C'])\nX_test,y_test = test[predictors],le.fit_transform(test['C'])",
"_____no_output_____"
]
],
[
[
"* Decision Tree Classifier",
"_____no_output_____"
]
],
[
[
"#perform k-fold (=10) cross-validation \n\n#parameters considered in this step\nmax_depth = np.arange(1,40,2) \nmin_samples_split = list(range(2, 100,10)) \nmax_leaf_nodes= list(range(2, 50,5)) \nmin_samples_leaf= list(range(1, 100,10)) \nmin_impurity_decrease=[0,0.00005,0.0001,0.0002,0.0005,0.001,0.0015,0.002,0.005,0.01,0.02,0.05,0.08] \ncriterion = ['gini','entropy']\n\n#assign parameters to a dictionary\nparams = {'max_depth':max_depth,'min_samples_split':min_samples_split,\n 'max_leaf_nodes':max_leaf_nodes,'min_samples_leaf':min_samples_leaf,\n 'min_impurity_decrease':min_impurity_decrease,'criterion':criterion}\n\n#plot validation curve\nfig,axs = plt.subplots(3,2,figsize=(10,8))\naxs = axs.ravel()\ndtc = DecisionTreeClassifier(random_state=1,criterion='entropy') #default model\n\nfor (param_name,param_range),i in zip(params.items(),range(len(params.items()))):\n train_scores,test_scores = validation_curve(dtc,X_train.values,y_train,cv=10,scoring='accuracy',\n n_jobs=-1,param_range=param_range,param_name=param_name)\n validation_curve_plot(train_scores,test_scores,param_range,param_name,axs[i])\nplt.show()",
"_____no_output_____"
],
[
"#train dtc model based on best parameters\ndtc = DecisionTreeClassifier(max_depth=5,random_state=2,criterion='entropy',min_samples_split=70,\n max_leaf_nodes=15,min_samples_leaf=40,min_impurity_decrease=0.01,max_features=4)\ndtc = dtc.fit(X_train,y_train)\n\n#export model as joblib file\ndump(dtc,r\".\\data\\models\\dtc_model_sargassum.joblib\")",
"_____no_output_____"
]
],
[
[
"* Maximum Likelihood Classifier",
"_____no_output_____"
]
],
[
[
"#train mlc model\nmlc = mlClassifier(train,'C')\n\n#export model as joblib file\ndump(mlc,r\".\\data\\models\\mlc_model_sargassum.joblib\")",
"_____no_output_____"
]
],
[
[
"* Compute model accuracies (based on test split)",
"_____no_output_____"
]
],
[
[
"#load models\ndtc = load(r\".\\data\\models\\dtc_model_sargassum.joblib\")\nmlc = load(r\".\\data\\models\\mlc_model_sargassum.joblib\")\n\n#DTC model accuracy\ndtc_y_pred = dtc.predict(X_test)\ncon_mat_dtc = calc_acc(le.inverse_transform(y_test),le.inverse_transform(dtc_y_pred))\ncon_mat_dtc['classifier'] = 'DTC'\n\n#MLC model accuracies with chi-square threshold\nchi_table = {'MLC base':None,'MLC 10%':7.78,'MLC 20%':5.99,'MLC 50%':3.36}\n\nmlc_conmats = []\nfor key,value in chi_table.items():\n con_mat_mlc = mlc.classify_testdata(test,'C',threshold=value)\n con_mat_mlc['classifier'] = key\n mlc_conmats.append(con_mat_mlc)\n\n#export model accuracies\nmlc_conmats = pd.concat(mlc_conmats)\nmodel_acc = pd.concat([con_mat_dtc,mlc_conmats])\nmodel_acc.to_csv('./data/output/objective1/dtc_mlc_model_acc_obj1.csv')",
"_____no_output_____"
]
],
[
[
"<b>Classification</b> \n* create an image stack for prediction (stack2pred) for all scenes in objective1 folder\n* classify each stack2pred image with the DTC and MLC models",
"_____no_output_____"
]
],
[
[
"#get all multiband and spectral index images\nstack_bands = glob(parent_dir+'/clipped*/*_clipped.tif')\nindex_bands = glob(parent_dir+'/index*/*_index.tif')\n\n#get the order of the selected predictors in the multiband and spectral index images\npredictors = ['NDVI','REP','B05','B11']\nused_indices, used_bands = get_feat_layer_order(predictors)\n\nstack2pred_paths = []\n\n#create stack2pred rasters\nfor band_image,index_image in zip(stack_bands,index_bands):\n date = re.findall(r\"(\\d{8})\", band_image)[0]\n outfile = os.path.join(f'{parent_dir}\\stack2pred',f'stack2pred_{date}.tif')\n stack2pred_paths.append(outfile)\n\n stack2pred(index_image,band_image,used_indices,used_bands,outfile)\n ",
"_____no_output_____"
],
[
"#load models\ndtc = load(r\".\\data\\models\\dtc_model_sargassum.joblib\")\nmlc = load(r\".\\data\\models\\mlc_model_sargassum.joblib\")\n\n#stack2pred image paths\nstack2pred_paths = glob(parent_dir+'*/stack2pred/stack2pred_*.tif')\n\n#classify all stack2pred images\nfor path in stack2pred_paths:\n \n date = re.findall(r\"(\\d{8})\", path)[0]\n \n #predict multiple mlc with thresholds\n mlc_out = f'{parent_dir}/predicted/mlc/mlc_{date}_multi.tif'\n os.makedirs(os.path.dirname(mlc_out),exist_ok=True)\n if not os.path.exists(mlc_out):\n chi_probs = [None,7.78,5.99,3.36]\n mlc_preds = np.array([mlc.classify_raster_gx(path,out_file=None,threshold=prob) for prob in chi_probs])\n\n #export multilayer mlc image\n with rio.open(path) as src:\n profile = src.profile.copy()\n profile.update({'dtype': rio.uint16})\n with rio.open(mlc_out ,'w',**profile) as dst:\n dst.write(mlc_preds.astype(rio.uint16))\n \n #predict and export DTC raster\n dtc_out = f'{parent_dir}/predicted/dtc/dtc_{date}.tif'\n os.makedirs(os.path.dirname(dtc_out),exist_ok=True)\n if not os.path.exists(dtc_out):\n dtc_pred_stack(dtc,path,dtc_out)",
"_____no_output_____"
]
],
[
[
"* MLC class posterior probability raster",
"_____no_output_____"
]
],
[
[
"#stack2pred image paths\nstack2pred_paths = glob(parent_dir+'*/stack2pred/stack2pred_*.tif')\n\n#compute probabality raster\nfor path in stack2pred_paths:\n mlc_prob_out = f'{parent_dir}/predicted/mlc/mlc_{date}_prob.tif'\n os.makedirs(os.path.dirname(mlc_out),exist_ok=True)\n mlc.prob_rasters(path,mlc_prob_out)",
"_____no_output_____"
]
],
[
[
"<b>External validity</b> \n* Classify DTC and MLC results for a scene taken on 2019-05-18\n* Validation samples only covers Non-Floating Sargassum (Non-Sf) and Floating Sargassum (Sf)\n* Floating Sargassum (Sf) pixel value = 3 in the DTC and MLC rasters ",
"_____no_output_____"
]
],
[
[
"#get file paths\nval_samples = gpd.read_file(r'./data/training_input/objective1/sf_validation_20190518.geojson')\ndtc_file = glob(parent_dir+'/predicted*/dtc/dtc*20190518*.tif')[0]\nmlc_file = glob(parent_dir+'/predicted*/mlc/mlc*20190518*.tif')[0]\n\ncoords = [(val_samples.geometry[i][0].x,val_samples.geometry[i][0].y) for i in range(len(val_samples))]\nwith rio.open(dtc_file) as dtc_src, rio.open(mlc_file) as mlc_src:\n \n #sample from dtc raster\n val_samples['DTC'] = [pt[0] for pt in dtc_src.sample(coords)]\n \n #sample from multilayer mlc raster\n mlc_multi = pd.concat([pd.DataFrame(pt).T for pt in mlc_src.sample(coords)],ignore_index=True)\n val_samples[['MLC base','MLC 10%','MLC 20%','MLC 50%']] = mlc_multi\n \n#convert pixel values to 1 if Sf, else to 0 for others\nval_samples[val_samples.columns[-5:]] = (val_samples[val_samples.columns[-5:]]==3).astype(int)\n\n#compute classification (validation) accuracy \ndf_val = pd.DataFrame(val_samples.drop(columns='geometry'))\n\nacc_val_dfs = []\nfor pred in df_val.columns[df_val.columns!='label']:\n acc = calc_acc(df_val['label'].values, df_val[pred].values)\n acc['classifier'] = pred\n acc_val_dfs.append(acc)\nacc_val_dfs = pd.concat(acc_val_dfs)\nacc_val_dfs.to_csv('./data/output/objective1/dtc_mlc_external_val_obj1.csv')",
"_____no_output_____"
]
],
[
[
"* Plot model and validation accuracies",
"_____no_output_____"
]
],
[
[
"model_df = pd.read_csv('./data/output/objective1/dtc_mlc_model_acc_obj1.csv').set_index('Model')\nval_df = pd.read_csv('./data/output/objective1/dtc_mlc_external_val_obj1.csv').set_index('Observed')\n\nacc2plot = {'Model accuracy (8 classes)':model_df.loc['PA','UA'].str[:4].astype(float),\n 'Model F1-score (Sf)':model_df.loc['Sf','F1-score'].astype(float),\n 'Validation accuracy (2 classes)':val_df.loc['PA','UA'].str[:4].astype(float),\n 'Validation F1-score (Sf)':val_df.loc['1','F1-score'].astype(float)}\n\n[plt.plot(val_df['classifier'].unique(),value,label=key) for key,value in acc2plot.items()]\nplt.legend()",
"_____no_output_____"
]
],
[
[
"<b>Comparative analysis</b> \n* Compare Sargassum (Sf and Sl) classified area across different scenes for each model\n* Persisting missclassification occur between the two Sargassum classes and other coastal features, hence a mask was applied.",
"_____no_output_____"
]
],
[
[
"#get classification result paths\ndtc_paths = glob(parent_dir+'/predicted*/dtc/dtc*.tif')\nmlc_paths = glob(parent_dir+'/predicted*/mlc/mlc*.tif')\n\n#load mask\nsl_mask = [gpd.read_file('./data/boundaries/sf_sl_mask.geojson').__geo_interface__['features'][0]['geometry']]\nsf_mask = [gpd.read_file('./data/boundaries/sf_sl_mask.geojson').__geo_interface__['features'][1]['geometry']]\n\n#collection of Sargassum classification results\ndata = dict.fromkeys(['Date','Sl MLC Base','Sl MLC 10%','Sl MLC 20%','Sl MLC 50%','Sl DTC',\n 'Sf MLC Base','Sf MLC 10%','Sf MLC 20%','Sf MLC 50%','Sf DTC'], [])\n\nfor i in range(len(mlc_paths)):\n date = re.findall(r\"(\\d{8})\", mlc_paths[i])\n data['Date'] = data['Date']+ [str(pd.to_datetime(date)[0].date())]\n \n with rio.open(dtc_paths[i]) as dtc_src, rio.open(mlc_paths[i]) as mlc_src:\n \n #sf pixel count\n dtc_img= mask(dataset=dtc_src,shapes=sf_mask,nodata=dtc_src.nodata,invert=True)[0]\n data['Sf DTC'] = data['Sf DTC']+[np.unique(dtc_img, return_counts=True)[1][2]]\n \n mlc_imgs= mask(dataset=mlc_src,shapes=sf_mask,nodata=mlc_src.nodata,invert=True)[0]\n for k,sf_mlc_key in enumerate(list(data.keys())[6:-1]): \n data[sf_mlc_key] = data[sf_mlc_key]+ [[np.unique(mlc_img, return_counts=True)[1][2] for mlc_img in mlc_imgs][k]]\n \n #sl pixel count\n dtc_img= mask(dataset=dtc_src,shapes=sl_mask,nodata=dtc_src.nodata,invert=False)[0]\n data['Sl DTC'] = data['Sl DTC']+[np.unique(dtc_img, return_counts=True)[1][3]]\n \n mlc_imgs= mask(dataset=mlc_src,shapes=sl_mask,nodata=mlc_src.nodata,invert=False)[0]\n for j,sl_mlc_key in enumerate(list(data.keys())[1:5]): \n data[sl_mlc_key] = data[sl_mlc_key]+[[np.unique(mlc_img, return_counts=True)[1][3] for mlc_img in mlc_imgs][j]]\n\n#export data\ndata = pd.DataFrame(data)\ndata.to_csv('./data/output/objective1/classified_area_obj1.csv',index=False)",
"_____no_output_____"
]
],
[
[
"* Plot Sargassum classified area in 2019",
"_____no_output_____"
]
],
[
[
"#load data and subset only the 2019 results\ndata = pd.read_csv('./data/output/objective1/classified_area_obj1.csv',index_col='Date')[4:]\n\n#plot Floating Sargassum (Sf) and Sargassum on land (Sl)\nfig,axs = plt.subplots(1,2,figsize=(20,8))\naxs[0].set_ylabel('Classified area (ha)')\nplt.tight_layout()\nfig.autofmt_xdate()\n\nplots = [axs[0].plot(data[col]/100) if 'Sf' in col else axs[1].plot(data[col]/100) for col in data.columns]\nlegends = axs[0].legend(data.columns[:5],loc='upper right'),axs[1].legend(data.columns[5:],loc='upper right')",
"_____no_output_____"
]
],
[
[
"<b>Sargassum coverage maps</b> \n* Compute Sargassum coverage maps for the invasions in March and May 2019 and March 2018\n* A 20mx20m grid was used to calculate the coverage for each scene\n* MLC 20% results were used for Floating Sargassum (Sf) coverage map\n* MLC 50% results were used for Sargassum on land (Sl) coverage map\n* Note that code below takes about 10 minutes to run (due to small grid tile size)\n",
"_____no_output_____"
]
],
[
[
"#get classification result paths\nmlc_paths = glob(parent_dir+'/predicted*/mlc/mlc*03*.tif')+glob(parent_dir+'/predicted*/mlc/mlc*05*.tif')\n\n#load mask and grid data\nmask_data = gpd.read_file('./data/boundaries/objective1/sf_sl_mask.geojson').__geo_interface__['features']\ngrid_file = gpd.read_file(r'./data/boundaries/objective1/20mgrid.geojson')\n\n#collect geodataframes\ndata = []\n\nfor mlc_file in mlc_paths:\n date = re.findall(r\"(\\d{8})\", mlc_file)[0]\n with rio.open(mlc_file) as src:\n \n #iterate according to mask data (first item = sl, second item = sf)\n #count number of pixel in each grid tile (computationaly intensive!)\n for feat,label,val,inv,model in zip(mask_data,['sl','sf'],[4,3],[False,True],[3,2]):\n img = mask(dataset=src,shapes=[feat['geometry']],nodata=src.nodata,invert=inv)[0][model]\n zs = zonal_stats(grid_file,np.where(img==val,1,0),affine=src.transform,\n prefix=f'{label}_{date}_',stats='count',geojson_out=True,nodata=0)\n zs_filter = list(filter(lambda x: x['properties'][f'{label}_{date}_count']!=0, zs))\n data.append(gpd.GeoDataFrame.from_features(zs_filter,crs=grid_file.crs))\n\n#merge with grid file based on id\ngrid_file_copy = grid_file.copy()\nfor i in range(len(data)):\n grid_file_copy = gpd.GeoDataFrame(grid_file_copy.merge(data[i][data[i].columns[1:]],on='id',how='outer'),\n crs=grid_file.crs,geometry=grid_file.geometry).replace(np.nan,0)\n\n#calculate coverage for each grid tile \nsf_split = np.array_split(grid_file_copy[[i for i in grid_file_copy.columns if 'sf' in i ]],3,axis=1)\nsl_split = np.array_split(grid_file_copy[[i for i in grid_file_copy.columns if 'sl' in i ]],3,axis=1)\nscale_factor = (100/4/400) #(relative coverage of Sentinel-2 pixels in a 20x20m tile over 4 dates)\nsf_covr = [sf_split[i].sum(1)*scale_factor for i in range(len(sf_split))]\nsl_covr = [sl_split[i].sum(1)*scale_factor for i in range(len(sl_split))]\n\n#export coverage maps\ngdf_out = pd.concat([grid_file_copy[['geometry']]]+sf_covr+sl_covr,axis=1)\ngdf_out.columns = ['geometry','sf_mar2018','sf_mar2019','sf_may2019','sl_mar2018','sl_mar2019','sl_may2019']\ngdf_out = gdf_out[gdf_out[gdf_out.columns[1:]].sum(1)!=0]\ngdf_out.to_file(r'./data/output/objective1/sargassum_coverage_coast.geojson',driver='GeoJSON')",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a261570e47ba94747a06c652f06833a2467f24f
| 615,272 |
ipynb
|
Jupyter Notebook
|
ML_model/tables.ipynb
|
harisaisundhar/climate-malaria-canal
|
b28da722c8ce72534b2ae706afa1ec089b77bab0
|
[
"MIT"
] | null | null | null |
ML_model/tables.ipynb
|
harisaisundhar/climate-malaria-canal
|
b28da722c8ce72534b2ae706afa1ec089b77bab0
|
[
"MIT"
] | null | null | null |
ML_model/tables.ipynb
|
harisaisundhar/climate-malaria-canal
|
b28da722c8ce72534b2ae706afa1ec089b77bab0
|
[
"MIT"
] | null | null | null | 481.810493 | 122,838 | 0.686376 |
[
[
[
"import pandas as pd\nand_data = pd.read_csv('ANDHRA_PD.csv')\nand_data.head()",
"_____no_output_____"
],
[
"del_data = pd.read_csv('DELHI.csv')\ndel_data.head()",
"_____no_output_____"
],
[
"kar_data = pd.read_csv('KARNATAKA.csv')\nkar_data.head()",
"_____no_output_____"
],
[
"mah_data = pd.read_csv('MAHARASHTRA.csv')\nmah_data.head()",
"_____no_output_____"
],
[
"tam_data = pd.read_csv('TAMIL_NADU.csv')\ntam_data.head()",
"_____no_output_____"
],
[
"utt_data = pd.read_csv('UTTAR_PD.csv')\nutt_data.head()",
"_____no_output_____"
],
[
"def Label(data):\n inc=[]\n means = data['Malaria_Incidence'].mean()\n stds = data['Malaria_Incidence'].std()\n thresh = means + (0.45*stds)\n for i in data['Malaria_Incidence']:\n if i < thresh:\n inc.append(-1)\n else:\n inc.append(1)\n print(inc)\n return inc",
"_____no_output_____"
],
[
"s_data = [del_data,kar_data,mah_data,tam_data,utt_data,and_data]\nfor i in s_data:\n hh = Label(i)\n i['M_inc'] = hh\n print(i)",
"[-1, -1, 1, 1, -1]\n Year AQI CO Temp Humidity Precipitation \\\n0 2015 297.024658 5.255151 26.665183 38.453311 0.032158 \n1 2014 301.035519 1.610082 27.001366 37.858379 0.016393 \n2 2013 252.247945 0.697753 27.483105 35.196689 0.017146 \n3 2012 249.158904 1.407068 28.009132 36.552055 0.031678 \n4 2011 232.104110 1.371616 30.222603 37.326941 0.086370 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 17818108 169 0 847 0 \n1 17554786 251 0 5574 6 \n2 17295356 413 0 2093 11 \n3 17039760 382 0 1131 19 \n4 16787941 303 0 6259 92 \n\n Rainfall Malaria_Incidence M_inc \n0 435.3 0.948473 -1 \n1 305.5 1.429810 -1 \n2 461.2 2.387924 1 \n3 391.2 2.241816 1 \n4 512.5 1.804867 -1 \n[1, 1, -1, -1, -1]\n Year AQI CO Temp Humidity Precipitation \\\n0 2015 106.497260 5.482808 23.849087 65.878425 0.061587 \n1 2014 104.532787 1.301858 23.949226 63.342668 0.050979 \n2 2013 87.149315 1.052904 24.145434 63.423059 0.081233 \n3 2012 86.419178 0.945671 24.631050 60.087557 0.074178 \n4 2011 91.602740 0.901753 25.017466 62.811986 0.226941 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 64844321 36859 0 2842 354 \n1 63886031 44319 11 6408 595 \n2 62941902 24237 0 3924 995 \n3 62011726 16466 0 405 1962 \n4 61095297 10170 5 2285 1482 \n\n Rainfall Malaria_Incidence M_inc \n0 1112.5 56.842295 1 \n1 1184.2 69.371973 1 \n2 1110.7 38.506939 -1 \n3 877.8 26.553043 -1 \n4 1087.2 16.646126 -1 \n[-1, 1, -1, -1, -1]\n Year AQI CO Temp Humidity Precipitation Population \\\n0 2015 75.551104 0.00000 27.694406 66.351484 0.115354 119270021 \n1 2014 74.654538 0.00000 27.136043 67.408698 0.181432 117507410 \n2 2013 73.757972 0.00000 27.697374 66.217580 0.125240 115770847 \n3 2012 92.953771 1.57526 28.235616 64.683219 0.083447 114059948 \n4 2011 107.950685 1.27174 28.382763 67.288242 0.346090 112374333 \n\n Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases Rainfall \\\n0 93818 58 6485 0 644.5 \n1 139198 96 5610 0 838.0 \n2 96577 118 2931 0 836.0 \n3 58517 200 1138 0 689.8 \n4 37068 227 1485 0 852.6 \n\n Malaria_Incidence M_inc \n0 78.660169 -1 \n1 118.458912 1 \n2 83.420829 -1 \n3 51.303723 -1 \n4 32.986180 -1 \n[-1, -1, 1, 1, -1]\n Year AQI CO Temp Humidity Precipitation \\\n0 2015 161.479452 2.380151 24.734703 55.760274 0.048790 \n1 2014 136.199454 1.129003 24.312500 57.611758 0.054474 \n2 2013 104.343836 0.220877 24.746918 57.843579 0.060502 \n3 2012 105.490411 0.870411 25.424886 56.199429 0.055639 \n4 2011 102.942466 0.864027 26.176256 53.920776 0.204041 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 76574228 14988 0 2074 1 \n1 75442589 17086 0 6122 4 \n2 74327674 22171 0 12826 6 \n3 73229235 18869 3 2501 5 \n4 72147030 13075 1 2051 12 \n\n Rainfall Malaria_Incidence M_inc \n0 1204.6 19.573165 -1 \n1 913.0 22.647685 -1 \n2 741.9 29.828728 1 \n3 636.1 25.767031 1 \n4 926.5 18.122714 -1 \n[-1, 1, 1, -1, -1]\n Year AQI CO Temp Humidity Precipitation \\\n0 2015 308.276013 12.862882 27.936073 46.130251 0.063881 \n1 2014 323.836867 13.326556 27.604850 48.134677 0.067953 \n2 2013 283.153320 91.218036 27.779452 45.797146 0.056644 \n3 2012 622.550685 33.369493 28.118379 44.859817 0.062089 \n4 2011 519.804110 25.995068 28.867009 49.020890 0.275445 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 212073536 55437 0 119 11 \n1 208939444 64606 0 1414 11 \n2 205851669 56968 0 342 5 \n3 202809526 47400 0 155 11 \n4 199812341 41891 0 960 14 \n\n Rainfall Malaria_Incidence M_inc \n0 603.3 26.140461 -1 \n1 701.8 30.920921 1 \n2 1041.4 27.674296 1 \n3 602.4 23.371683 -1 \n4 781.2 20.965172 -1 \n[-1, 1, 1, -1, -1]\n Year AQI CO Temp Humidity Precipitation \\\n0 2015 127.690608 0.748287 26.819635 51.720091 0.039897 \n1 2014 121.851093 0.821107 26.253301 54.376480 0.045048 \n2 2013 112.439726 0.254301 27.177968 47.469749 0.041450 \n3 2012 97.556164 0.621726 27.814384 47.174087 0.047454 \n4 2011 93.980822 0.570110 28.015639 55.852740 0.235377 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 52618984 25152 3 805 4 \n1 51841364 33393 20 910 0 \n2 51075235 34949 5 2299 7 \n3 50320429 24699 2 1209 0 \n4 49576777 13351 0 776 1 \n\n Rainfall Malaria_Incidence M_inc \n0 857.3 47.800239 -1 \n1 746.4 64.413814 1 \n2 1348.5 68.426508 1 \n3 1008.6 49.083445 -1 \n4 753.1 26.929947 -1 \n"
],
[
"df = and_data.append([del_data,kar_data,mah_data,tam_data,utt_data])\ndf.head(10)",
"_____no_output_____"
],
[
"from texttable import Texttable\nfull_headers = df.columns\nvalues = list(df.isnull().sum())\nnullList = []\nnullList.append(['Feature','Null Values count'])\nfor i in range(len(full_headers)):\n nullList.append([full_headers[i],values[i]])\n\ntable = Texttable()\ntable.add_rows(nullList)\nprint(table.draw()) \nprint(\"\\n\")",
"+-------------------+-------------------+\n| Feature | Null Values count |\n+===================+===================+\n| Year | 0 |\n+-------------------+-------------------+\n| AQI | 0 |\n+-------------------+-------------------+\n| CO | 0 |\n+-------------------+-------------------+\n| Temp | 0 |\n+-------------------+-------------------+\n| Humidity | 0 |\n+-------------------+-------------------+\n| Precipitation | 0 |\n+-------------------+-------------------+\n| Population | 0 |\n+-------------------+-------------------+\n| Malaria_Cases | 0 |\n+-------------------+-------------------+\n| Malaria_Death | 0 |\n+-------------------+-------------------+\n| Dengue_Cases | 0 |\n+-------------------+-------------------+\n| Kalaazar_Cases | 0 |\n+-------------------+-------------------+\n| Rainfall | 0 |\n+-------------------+-------------------+\n| Malaria_Incidence | 0 |\n+-------------------+-------------------+\n| M_inc | 0 |\n+-------------------+-------------------+\n\n\n"
],
[
"import matplotlib.pyplot as plotting\nprint(\"The histograms of the attributes are given below:\")\ndf.hist(bins=5,grid=False,layout=[6,6],figsize=[20,20])\nplotting.show()\nprint(\"\\n\")",
"The histograms of the attributes are given below:\n"
],
[
"#k-mean-clustering\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.cluster import KMeans\n\n\nx = df.iloc[:, [1, 2, 3, 4, 5, 6, 11]].values",
"_____no_output_____"
],
[
"wcss = []\n\nfor i in range(1, 11):\n kmeans = KMeans(n_clusters = i, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)\n kmeans.fit(x)\n wcss.append(kmeans.inertia_)\n \n#Plotting the results onto a line graph, allowing us to observe 'The elbow'\nplt.plot(range(1, 11), wcss)\nplt.title('The elbow method')\nplt.xlabel('Number of clusters')\nplt.ylabel('WCSS') #within cluster sum of squares\nplt.show()",
"_____no_output_____"
],
[
"kmeans = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 300, n_init = 10, random_state = 0)\ny_kmeans = kmeans.fit_predict(x)\n",
"_____no_output_____"
],
[
"\nplt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1], s = 100, c = 'blue', label = '1')\nplt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1], s = 100, c = 'green', label = '-1')\n\n#Plotting the centroids of the clusters\nplt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1], s = 100, c = 'yellow', label = 'Centroids')\n\nplt.legend()\n",
"_____no_output_____"
],
[
"#K-nn\nx = df.iloc[:,1:7]\ny = df.iloc[:,-1]\n#print(x,y)\n",
"_____no_output_____"
],
[
"import numpy as np\npredictors = ['AQI','CO','Temp','Humidity','Precipitation','Population','Rainfall']\ncrux = \"M_inc\"\n\nX = df.loc[:,predictors]\nY = np.ravel(df.loc[:,[crux]])",
"_____no_output_____"
],
[
"print(Y)",
"[-1 1 1 -1 -1 -1 -1 1 1 -1 1 1 -1 -1 -1 -1 1 -1 -1 -1 -1 -1 1 1\n -1 -1 1 1 -1 -1]\n"
],
[
"from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\n\nrf = RandomForestClassifier()\nsc = cross_val_score(rf, X, Y, scoring='accuracy').mean()\nprint(\"Benchmark-> Accuracy before Norm and PCA:- %s\"%round(sc*100,2))",
"Benchmark-> Accuracy before Norm and PCA:- 56.67\n"
],
[
"import seaborn as sns\nsns.swarmplot(y='Rainfall',x='M_inc', data=df)\nplotting.show()\nprint(\"\\n\")",
"_____no_output_____"
],
[
"sns.swarmplot(y='Temp',x='M_inc', data=df)\nplotting.show()\nprint(\"\\n\")",
"_____no_output_____"
],
[
"sns.swarmplot(y='AQI',x='M_inc', data=df)\nplotting.show()\nprint(\"\\n\")",
"_____no_output_____"
],
[
"import seaborn as sns\nb = df\nb_corr = b.drop(['Year','Malaria_Cases','Malaria_Death','Dengue_Cases','Kalaazar_Cases','Malaria_Incidence','M_inc'],axis=1)\ncorrelation = b_corr.corr()\nheatmap = sns.heatmap(correlation, cbar=True, annot=True, cmap=\"bwr\", linewidths=.75)\nheatmap.set_title(\"Correlation heatmap\\n\")\nplotting.show()",
"_____no_output_____"
],
[
"print(df)",
" Year AQI CO Temp Humidity Precipitation \\\n0 2015 127.690608 0.748287 26.819635 51.720091 0.039897 \n1 2014 121.851093 0.821107 26.253301 54.376480 0.045048 \n2 2013 112.439726 0.254301 27.177968 47.469749 0.041450 \n3 2012 97.556164 0.621726 27.814384 47.174087 0.047454 \n4 2011 93.980822 0.570110 28.015639 55.852740 0.235377 \n0 2015 297.024658 5.255151 26.665183 38.453311 0.032158 \n1 2014 301.035519 1.610082 27.001366 37.858379 0.016393 \n2 2013 252.247945 0.697753 27.483105 35.196689 0.017146 \n3 2012 249.158904 1.407068 28.009132 36.552055 0.031678 \n4 2011 232.104110 1.371616 30.222603 37.326941 0.086370 \n0 2015 106.497260 5.482808 23.849087 65.878425 0.061587 \n1 2014 104.532787 1.301858 23.949226 63.342668 0.050979 \n2 2013 87.149315 1.052904 24.145434 63.423059 0.081233 \n3 2012 86.419178 0.945671 24.631050 60.087557 0.074178 \n4 2011 91.602740 0.901753 25.017466 62.811986 0.226941 \n0 2015 75.551104 0.000000 27.694406 66.351484 0.115354 \n1 2014 74.654538 0.000000 27.136043 67.408698 0.181432 \n2 2013 73.757972 0.000000 27.697374 66.217580 0.125240 \n3 2012 92.953771 1.575260 28.235616 64.683219 0.083447 \n4 2011 107.950685 1.271740 28.382763 67.288242 0.346090 \n0 2015 161.479452 2.380151 24.734703 55.760274 0.048790 \n1 2014 136.199454 1.129003 24.312500 57.611758 0.054474 \n2 2013 104.343836 0.220877 24.746918 57.843579 0.060502 \n3 2012 105.490411 0.870411 25.424886 56.199429 0.055639 \n4 2011 102.942466 0.864027 26.176256 53.920776 0.204041 \n0 2015 308.276013 12.862882 27.936073 46.130251 0.063881 \n1 2014 323.836867 13.326556 27.604850 48.134677 0.067953 \n2 2013 283.153320 91.218036 27.779452 45.797146 0.056644 \n3 2012 622.550685 33.369493 28.118379 44.859817 0.062089 \n4 2011 519.804110 25.995068 28.867009 49.020890 0.275445 \n\n Population Malaria_Cases Malaria_Death Dengue_Cases Kalaazar_Cases \\\n0 52618984 25152 3 805 4 \n1 51841364 33393 20 910 0 \n2 51075235 34949 5 2299 7 \n3 50320429 24699 2 1209 0 \n4 49576777 13351 0 776 1 \n0 17818108 169 0 847 0 \n1 17554786 251 0 5574 6 \n2 17295356 413 0 2093 11 \n3 17039760 382 0 1131 19 \n4 16787941 303 0 6259 92 \n0 64844321 36859 0 2842 354 \n1 63886031 44319 11 6408 595 \n2 62941902 24237 0 3924 995 \n3 62011726 16466 0 405 1962 \n4 61095297 10170 5 2285 1482 \n0 119270021 93818 58 6485 0 \n1 117507410 139198 96 5610 0 \n2 115770847 96577 118 2931 0 \n3 114059948 58517 200 1138 0 \n4 112374333 37068 227 1485 0 \n0 76574228 14988 0 2074 1 \n1 75442589 17086 0 6122 4 \n2 74327674 22171 0 12826 6 \n3 73229235 18869 3 2501 5 \n4 72147030 13075 1 2051 12 \n0 212073536 55437 0 119 11 \n1 208939444 64606 0 1414 11 \n2 205851669 56968 0 342 5 \n3 202809526 47400 0 155 11 \n4 199812341 41891 0 960 14 \n\n Rainfall Malaria_Incidence M_inc \n0 857.3 47.800239 -1 \n1 746.4 64.413814 1 \n2 1348.5 68.426508 1 \n3 1008.6 49.083445 -1 \n4 753.1 26.929947 -1 \n0 435.3 0.948473 -1 \n1 305.5 1.429810 -1 \n2 461.2 2.387924 1 \n3 391.2 2.241816 1 \n4 512.5 1.804867 -1 \n0 1112.5 56.842295 1 \n1 1184.2 69.371973 1 \n2 1110.7 38.506939 -1 \n3 877.8 26.553043 -1 \n4 1087.2 16.646126 -1 \n0 644.5 78.660169 -1 \n1 838.0 118.458912 1 \n2 836.0 83.420829 -1 \n3 689.8 51.303723 -1 \n4 852.6 32.986180 -1 \n0 1204.6 19.573165 -1 \n1 913.0 22.647685 -1 \n2 741.9 29.828728 1 \n3 636.1 25.767031 1 \n4 926.5 18.122714 -1 \n0 603.3 26.140461 -1 \n1 701.8 30.920921 1 \n2 1041.4 27.674296 1 \n3 602.4 23.371683 -1 \n4 781.2 20.965172 -1 \n"
],
[
"from sklearn.datasets import make_classification\nfrom sklearn.feature_selection import RFE\nfrom sklearn.tree import DecisionTreeClassifier\n# define dataset\ny = df.drop(columns=['Year','Malaria_Cases','Malaria_Death','Dengue_Cases','Kalaazar_Cases','Malaria_Incidence', 'AQI','CO','Temp','Humidity','Precipitation','Rainfall','Population'],axis=1)\n# = x1.values.reshape(1,-1)\nx = df.drop(columns=['Year','Malaria_Cases','Malaria_Death','Dengue_Cases','Kalaazar_Cases','Malaria_Incidence','M_inc'],axis=1)\nprint(x)\nprint(y)\n",
" AQI CO Temp Humidity Precipitation Population \\\n0 127.690608 0.748287 26.819635 51.720091 0.039897 52618984 \n1 121.851093 0.821107 26.253301 54.376480 0.045048 51841364 \n2 112.439726 0.254301 27.177968 47.469749 0.041450 51075235 \n3 97.556164 0.621726 27.814384 47.174087 0.047454 50320429 \n4 93.980822 0.570110 28.015639 55.852740 0.235377 49576777 \n0 297.024658 5.255151 26.665183 38.453311 0.032158 17818108 \n1 301.035519 1.610082 27.001366 37.858379 0.016393 17554786 \n2 252.247945 0.697753 27.483105 35.196689 0.017146 17295356 \n3 249.158904 1.407068 28.009132 36.552055 0.031678 17039760 \n4 232.104110 1.371616 30.222603 37.326941 0.086370 16787941 \n0 106.497260 5.482808 23.849087 65.878425 0.061587 64844321 \n1 104.532787 1.301858 23.949226 63.342668 0.050979 63886031 \n2 87.149315 1.052904 24.145434 63.423059 0.081233 62941902 \n3 86.419178 0.945671 24.631050 60.087557 0.074178 62011726 \n4 91.602740 0.901753 25.017466 62.811986 0.226941 61095297 \n0 75.551104 0.000000 27.694406 66.351484 0.115354 119270021 \n1 74.654538 0.000000 27.136043 67.408698 0.181432 117507410 \n2 73.757972 0.000000 27.697374 66.217580 0.125240 115770847 \n3 92.953771 1.575260 28.235616 64.683219 0.083447 114059948 \n4 107.950685 1.271740 28.382763 67.288242 0.346090 112374333 \n0 161.479452 2.380151 24.734703 55.760274 0.048790 76574228 \n1 136.199454 1.129003 24.312500 57.611758 0.054474 75442589 \n2 104.343836 0.220877 24.746918 57.843579 0.060502 74327674 \n3 105.490411 0.870411 25.424886 56.199429 0.055639 73229235 \n4 102.942466 0.864027 26.176256 53.920776 0.204041 72147030 \n0 308.276013 12.862882 27.936073 46.130251 0.063881 212073536 \n1 323.836867 13.326556 27.604850 48.134677 0.067953 208939444 \n2 283.153320 91.218036 27.779452 45.797146 0.056644 205851669 \n3 622.550685 33.369493 28.118379 44.859817 0.062089 202809526 \n4 519.804110 25.995068 28.867009 49.020890 0.275445 199812341 \n\n Rainfall \n0 857.3 \n1 746.4 \n2 1348.5 \n3 1008.6 \n4 753.1 \n0 435.3 \n1 305.5 \n2 461.2 \n3 391.2 \n4 512.5 \n0 1112.5 \n1 1184.2 \n2 1110.7 \n3 877.8 \n4 1087.2 \n0 644.5 \n1 838.0 \n2 836.0 \n3 689.8 \n4 852.6 \n0 1204.6 \n1 913.0 \n2 741.9 \n3 636.1 \n4 926.5 \n0 603.3 \n1 701.8 \n2 1041.4 \n3 602.4 \n4 781.2 \n M_inc\n0 -1\n1 1\n2 1\n3 -1\n4 -1\n0 -1\n1 -1\n2 1\n3 1\n4 -1\n0 1\n1 1\n2 -1\n3 -1\n4 -1\n0 -1\n1 1\n2 -1\n3 -1\n4 -1\n0 -1\n1 -1\n2 1\n3 1\n4 -1\n0 -1\n1 1\n2 1\n3 -1\n4 -1\n"
],
[
"# define RFE\nrfe = RFE(estimator=DecisionTreeClassifier(), n_features_to_select=3)\n# fit RFE\nrfe.fit(x, y)\n# summarize all features\nfor i in range(x.shape[1]):\n\tprint('Column: %d, Selected %s, Rank: %.3f' % (i, rfe.support_[i], rfe.ranking_[i]))\n\n#Temp, Humidty and Rainfall takes the top place",
"Column: 0, Selected False, Rank: 5.000\nColumn: 1, Selected True, Rank: 1.000\nColumn: 2, Selected False, Rank: 3.000\nColumn: 3, Selected False, Rank: 2.000\nColumn: 4, Selected True, Rank: 1.000\nColumn: 5, Selected False, Rank: 4.000\nColumn: 6, Selected True, Rank: 1.000\n"
],
[
"label = df['M_inc']\ndf = df.drop(columns=['Year','Malaria_Cases','Malaria_Death','Dengue_Cases','Kalaazar_Cases','Malaria_Incidence','M_inc'],axis=1)\nheaders = df.columns\nminimum = list(map(lambda x: round(x,4),df.min()))\nmean = list(map(lambda x: round(x,4),df.mean()))\nmaximum = list(map(lambda x: round(x,4),df.max()))\nstd =list(map(lambda x: round(x,4),df.std()))\n\nbefore_scaling=[]\nbefore_scaling.append(['Feature','Min','Mean','Max','Std. Dev'])\nbefore_scaling.append(['M_inci',label.min(),label.mean(),label.max(),label.std()])\nfor i in range(len(headers)):\n before_scaling.append([headers[i],minimum[i],mean[i],maximum[i],std[i]])\n\nprint(\"\\nBEFORE FEATURE SCALING\")\ntable1 = Texttable()\ntable1.add_rows(before_scaling)\nprint(table1.draw())\nprint(\"\\n\")",
"\nBEFORE FEATURE SCALING\n+---------------+----------+--------------+-----------+--------------+\n| Feature | Min | Mean | Max | Std. Dev |\n+===============+==========+==============+===========+==============+\n| M_inci | -1 | -0.267 | 1 | 0.980 |\n+---------------+----------+--------------+-----------+--------------+\n| AQI | 73.758 | 181.808 | 622.551 | 134.776 |\n+---------------+----------+--------------+-----------+--------------+\n| CO | 0 | 6.938 | 91.218 | 17.707 |\n+---------------+----------+--------------+-----------+--------------+\n| Temp | 23.849 | 26.730 | 30.223 | 1.665 |\n+---------------+----------+--------------+-----------+--------------+\n| Humidity | 35.197 | 53.492 | 67.409 | 10.239 |\n+---------------+----------+--------------+-----------+--------------+\n| Precipitation | 0.016 | 0.096 | 0.346 | 0.083 |\n+---------------+----------+--------------+-----------+--------------+\n| Population | 16787941 | 87896594.933 | 2.121e+08 | 61416452.214 |\n+---------------+----------+--------------+-----------+--------------+\n| Rainfall | 305.500 | 806.837 | 1348.500 | 258.264 |\n+---------------+----------+--------------+-----------+--------------+\n\n\n"
],
[
"df.shape",
"_____no_output_____"
],
[
"from sklearn import preprocessing\ndf = pd.DataFrame(preprocessing.scale(df.iloc[:,0:7]))\n\nminimum = list(map(lambda x: round(x,4),df.min()))\nmean = list(map(lambda x: round(x,4),df.mean()))\nmaximum = list(map(lambda x: round(x,4),df.max()))\nstd =list(map(lambda x: round(x,4),df.std()))\n\nafter_scaling=[]\nafter_scaling.append(['Feature','Min','Mean','Max','Std. Dev'])\nafter_scaling.append(['M_inci',label.min(),label.mean(),label.max(),label.std()])\nfor i in range(len(headers)):\n after_scaling.append([headers[i],minimum[i],mean[i],maximum[i],std[i]])\n\nprint(\"\\nAFTER FEATURE SCALING\")\ntable2 = Texttable()\ntable2.add_rows(after_scaling)\nprint(table2.draw())\nprint(\"\\n\")",
"\nAFTER FEATURE SCALING\n+---------------+--------+--------+-------+----------+\n| Feature | Min | Mean | Max | Std. Dev |\n+===============+========+========+=======+==========+\n| M_inci | -1 | -0.267 | 1 | 0.980 |\n+---------------+--------+--------+-------+----------+\n| AQI | -0.815 | 0 | 3.326 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| CO | -0.399 | 0 | 4.841 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| Temp | -1.760 | 0 | 2.133 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| Humidity | -1.817 | 0 | 1.382 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| Precipitation | -0.981 | 0 | 3.068 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| Population | -1.178 | 0 | 2.056 | 1.017 |\n+---------------+--------+--------+-------+----------+\n| Rainfall | -1.974 | 0 | 2.133 | 1.017 |\n+---------------+--------+--------+-------+----------+\n\n\n"
],
[
"from sklearn.decomposition import PCA\n\npca = PCA(n_components=len(df.columns))\npca.fit_transform(df)\ncomponents = abs(pca.components_)\neigen_values = pca.explained_variance_\nratio_values = pca.explained_variance_ratio_\nplotting.ylabel(\"Eigen values\")\nplotting.xlabel(\"Number of features\")\nplotting.title(\"PCA eigen values\")\nplotting.ylim(0, max(eigen_values))\nplotting.xticks([1,2,3,4,5,6,7,8,9,10,15,20,25,30])\nplotting.style.context('seaborn-whitegrid')\nplotting.axhline(y=1,color='r',linestyle='--')\nplotting.plot(eigen_values)\nplotting.figure(figsize=(500,500))\nplotting.show()\nprint(\"\\n\")",
"_____no_output_____"
],
[
"tableList=[]\ntableList.append([\"NC\",\"SP\",\"EV\",\"CEV\"])\nfor i in range(len(eigen_values)):\n total=0\n for j in range(i+1):\n total+=ratio_values[j]\n tableList.append([i+1,round(eigen_values[i],2),round(ratio_values[i],2),round(total*100,2)])\n\n\nprint(\"\\nPCA Table\")\ntable3 = Texttable()\ntable3.add_rows(tableList)\nprint(table3.draw())\nprint(\"\\n\")\n",
"\nPCA Table\n+----+-------+-------+--------+\n| NC | SP | EV | CEV |\n+====+=======+=======+========+\n| 1 | 2.880 | 0.400 | 39.720 |\n+----+-------+-------+--------+\n| 2 | 1.880 | 0.260 | 65.720 |\n+----+-------+-------+--------+\n| 3 | 1.240 | 0.170 | 82.890 |\n+----+-------+-------+--------+\n| 4 | 0.500 | 0.070 | 89.780 |\n+----+-------+-------+--------+\n| 5 | 0.390 | 0.050 | 95.230 |\n+----+-------+-------+--------+\n| 6 | 0.270 | 0.040 | 98.990 |\n+----+-------+-------+--------+\n| 7 | 0.070 | 0.010 | 100 |\n+----+-------+-------+--------+\n\n\n"
],
[
"pca_new = PCA(n_components=3)\ndf = pca_new.fit_transform(df)\n\nX = pd.DataFrame(df)\nY = pd.DataFrame(label)\nprint(df.shape)\nprint(X.shape)\nprint(Y.shape)",
"(30, 3)\n(30, 3)\n(30, 1)\n"
],
[
"from sklearn.model_selection import train_test_split\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.3, random_state=0)\nprint('Original Data Set',df.shape)\nprint('Shape of X training set : ',X_train.shape,' || Shape of test set : ',X_test.shape) \nprint('Shape of Y training set : ',Y_train.shape,' || Shape of test set : ',Y_test.shape) ",
"Original Data Set (30, 3)\nShape of X training set : (21, 3) || Shape of test set : (9, 3)\nShape of Y training set : (21, 1) || Shape of test set : (9, 1)\n"
],
[
"table_report=[]\ntable_report.append([\"Model\",\"Acc\",\"Prec\",\"Recall\",\"F1\"])\nglobal count_lis\ncount_lis = 1",
"_____no_output_____"
],
[
"#PRINT FUNCTION\n\nfrom sklearn.metrics import confusion_matrix, accuracy_score, classification_report\n\ndef truncate(f, n):\n return np.floor(f * 10 ** n) / 10 ** n\n\ndef evaluate(sco, model, X_train, X_test, y_train, y_test):\n global count_lis\n y_test_pred = model.predict(X_test)\n y_train_pred = model.predict(X_train)\n\n clf_report = pd.DataFrame(classification_report(y_train, y_train_pred, output_dict=True))\n acc = round(sco*100,2)\n #print(f\"Accuracy %s\" % round(accuracy_score(y_train, y_train_pred)*100,2))\n lisp =[]\n lisp = truncate(clf_report.mean(axis = 1).astype(float),2)\n table_report.append([count_lis,acc,lisp['precision'],lisp['recall'],lisp['f1-score']])\n count_lis = count_lis+1",
"_____no_output_____"
],
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"#PERCEPTRON\nfrom sklearn.model_selection import cross_val_score, cross_val_predict\nfrom sklearn.linear_model import Perceptron\n\npct_clf = Perceptron()\nsc = cross_val_score(pct_clf, X_train, Y_train, scoring='accuracy' ,cv=10).mean()\n\npct_clf.fit(X_train, Y_train)\nevaluate(sc, pct_clf, X_train, X_test, Y_train, Y_test)\n\nscores = {\n 'Perceptron': {\n 'Train': accuracy_score(Y_train, pct_clf.predict(X_train)),\n 'Test': accuracy_score(Y_test, pct_clf.predict(X_test)),\n },\n}\n\nprint(\"Perceptron Accuracy :- %s\" % round(sc*100,2))\nprint(pct_clf.predict(X_test))",
"Perceptron Accuracy :- 66.67\n[-1 -1 1 1 -1 -1 -1 1 -1]\n"
],
[
"#XGBOOST\n\nfrom xgboost import XGBClassifier\n\nxgb_clf = XGBClassifier(eval_metric='mlogloss')\nsc = cross_val_score(xgb_clf, X_train, Y_train, scoring='accuracy', cv=10).mean()\n\nxgb_clf.fit(X_train, Y_train)\nevaluate(sc, xgb_clf, X_train, X_test, Y_train, Y_test)\n\nscores['xgboost'] = {\n 'Train': accuracy_score(Y_train, xgb_clf.predict(X_train)),\n 'Test': accuracy_score(Y_test, xgb_clf.predict(X_test)),\n }\n\nprint(\"XGBoost Accuracy :- %s\" % round((sc)*100,2))\nprint(xgb_clf.predict(X_test))",
"XGBoost Accuracy :- 65.0\n[-1 -1 -1 -1 -1 -1 -1 -1 -1]\n"
],
[
"#KNN\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn_clf = KNeighborsClassifier()\nsc = cross_val_score(knn_clf, X_train, Y_train, scoring='accuracy' ,cv=10).mean()\n\nknn_clf.fit(X_train, Y_train)\nevaluate(sc, knn_clf, X_train, X_test, Y_train, Y_test)\n\nscores['KNN'] = {\n 'Train': accuracy_score(Y_train, knn_clf.predict(X_train)),\n 'Test': accuracy_score(Y_test, knn_clf.predict(X_test)),\n }\n\nprint(\"KNN Accuracy after Norm and PCA :- %s\" % round(sc*100,2))\nprint(knn_clf.predict(X_test))",
"KNN Accuracy after Norm and PCA :- 51.67\n[-1 -1 -1 -1 -1 -1 -1 -1 -1]\n"
],
[
"from sklearn.ensemble import VotingClassifier\nfrom sklearn.linear_model import LogisticRegression\nestimators = []\n\nlog_flag = LogisticRegression()\nestimators.append(('Logistic', log_flag))\n\npct_flag = Perceptron()\nestimators.append(('Percept', pct_flag))\n\nxgb_flag = XGBClassifier(eval_metric='mlogloss')\nestimators.append(('XGBboost', xgb_flag))\n\nknn_flag = KNeighborsClassifier()\nestimators.append(('KNN', knn_flag))\n\n#By default its hard voting\nvoting = VotingClassifier(estimators=estimators)\nvoting.fit(X_train, Y_train)\n\n#acc = round(accuracy_score(Y_train, Y_train_pred)*100,2)\nacc = round(cross_val_score(voting, X_train, Y_train ,scoring='accuracy',cv=10).mean()*100,2)\n\nglobal count_lis\nY_test_pred = voting.predict(X_test)\nY_train_pred = voting.predict(X_train)\n\nclf_report = pd.DataFrame(classification_report(Y_train, Y_train_pred, output_dict=True))\n\nlisp =[]\nlisp = truncate(clf_report.mean(axis = 1).astype(float),2)\ntable_report.append([count_lis,acc,lisp['precision'],lisp['recall'],lisp['f1-score']])\n\nscores['Voting'] = {\n 'Train': accuracy_score(Y_train, voting.predict(X_train)),\n 'Test': accuracy_score(Y_test, voting.predict(X_test)),\n }\n",
"_____no_output_____"
],
[
"print(\"\\nPREDICTION RESULTS\")\nprint(\" 1. PERCEPTRO\\n 2. XGBOOST\\n 3. KNN (K-NEAREST NEIGHBOR)\\n 4. ENSEMBLE VOTING CLASSIFIER\")\ntable4 = Texttable()\ntable4.add_rows(table_report)\nprint(table4.draw())",
"\nPREDICTION RESULTS\n 1. PERCEPTRO\n 2. XGBOOST\n 3. KNN (K-NEAREST NEIGHBOR)\n 4. ENSEMBLE VOTING CLASSIFIER\n+-------+--------+-------+--------+-------+\n| Model | Acc | Prec | Recall | F1 |\n+=======+========+=======+========+=======+\n| 1 | 66.670 | 0.520 | 0.530 | 0.520 |\n+-------+--------+-------+--------+-------+\n| 2 | 65 | 0.920 | 0.860 | 0.880 |\n+-------+--------+-------+--------+-------+\n| 3 | 51.670 | 0.720 | 0.710 | 0.710 |\n+-------+--------+-------+--------+-------+\n| 4 | 71.670 | 0.840 | 0.650 | 0.630 |\n+-------+--------+-------+--------+-------+\n"
],
[
"scores_df = pd.DataFrame(scores)\n\nscoresList=[]\nscoresList.append([\"Model\",\"Train Acc\",\"Test Acc\",\"Diff Accuracy\"])\nfor i in scores_df:\n li = list(scores_df[i])\n scoresList.append([i,round(li[0],4),round(li[1],4),\"{:.2f}%\".format(round(li[1]/li[0],4)*100)])\n\ntable5 = Texttable() \ntable5.add_rows(scoresList)\nprint(table5.draw())",
"+------------+-----------+----------+---------------+\n| Model | Train Acc | Test Acc | Diff Accuracy |\n+============+===========+==========+===============+\n| Perceptron | 0.619 | 0.556 | 89.74% |\n+------------+-----------+----------+---------------+\n| xgboost | 0.905 | 0.444 | 49.12% |\n+------------+-----------+----------+---------------+\n| KNN | 0.762 | 0.444 | 58.33% |\n+------------+-----------+----------+---------------+\n| Voting | 0.762 | 0.444 | 58.33% |\n+------------+-----------+----------+---------------+\n"
],
[
"scores_df.plot(kind='bar', figsize=(15, 8))\nplotting.show()",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a261bec2241224749b122f7f39ef468bb7df779
| 66,525 |
ipynb
|
Jupyter Notebook
|
notebooks/analysis2.ipynb
|
data301-2021-winter1/project-group41-project
|
bb042f15ed9ab7cf14316c4c490416124df688d1
|
[
"MIT"
] | null | null | null |
notebooks/analysis2.ipynb
|
data301-2021-winter1/project-group41-project
|
bb042f15ed9ab7cf14316c4c490416124df688d1
|
[
"MIT"
] | null | null | null |
notebooks/analysis2.ipynb
|
data301-2021-winter1/project-group41-project
|
bb042f15ed9ab7cf14316c4c490416124df688d1
|
[
"MIT"
] | null | null | null | 65.671273 | 8,496 | 0.698098 |
[
[
[
"# Anisha Parikh",
"_____no_output_____"
],
[
"## Research question/interests\nMy research question is what are the top 10 most remembered songs and the bottom 10 least remembered songs. As well as how does recollection of the songs compare across generations.",
"_____no_output_____"
],
[
"Imports",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nimport project_functions2 as pf\nfrom pandas_profiling import ProfileReport",
"_____no_output_____"
],
[
"df = pd.read_csv(\"../data/raw/recognition_by_generation.csv\")\ndf",
"_____no_output_____"
],
[
"df.describe().T",
"_____no_output_____"
],
[
"ProfileReport(df).to_notebook_iframe()",
"_____no_output_____"
]
],
[
[
"## Milestone 3",
"_____no_output_____"
],
[
"Data is trimmed to each generation and the average of generations into three different datasets. Then it is reordered from least recognized to most recognized (top to bottom). ",
"_____no_output_____"
],
[
" steps\n1. remove table\n2. create new column of avg between gen \n3. drop irrelevant columns based on which data sheet\n4. sort the values from greatest to least according to recognition of the cloumn left\n5. reindex values\n6. created new sheets in different orders\n7. create graphs which indicate how many songs were remembered at which ",
"_____no_output_____"
]
],
[
[
"(Mill, GenZ, Avg) = pf.load_and_process(\"../data/raw/recognition_by_generation.csv\") ",
"C:\\users\\anish\\OneDrive\\Documents\\Year 2 Term 1\\DATA301\\project-group41-project\\notebooks\\project_functions2.py:10: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only\n nintiesRecData.sort_values(by=['mean_millennial_recognition'])\nC:\\users\\anish\\OneDrive\\Documents\\Year 2 Term 1\\DATA301\\project-group41-project\\notebooks\\project_functions2.py:17: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only\n GenZ = (nintiesRecData.sort_values(by=['mean_gen_z_recognition'])\nC:\\users\\anish\\OneDrive\\Documents\\Year 2 Term 1\\DATA301\\project-group41-project\\notebooks\\project_functions2.py:24: FutureWarning: In a future version of pandas all arguments of DataFrame.drop except for the argument 'labels' will be keyword-only\n Avg = (nintiesRecData.sort_values(by=['avg_across_gen'])\n"
],
[
"#Songs recognized by millennials\nMill",
"_____no_output_____"
],
[
"#Songs recognized by generation z\nGenZ",
"_____no_output_____"
],
[
"#Average of the songs recognized by both generation\nAvg",
"_____no_output_____"
]
],
[
[
"# Graphs",
"_____no_output_____"
],
[
"## Distribution Plot",
"_____no_output_____"
]
],
[
[
"sns.displot(data = Mill, bins = 15)",
"_____no_output_____"
],
[
"sns.displot(data = GenZ, bins = 15)",
"_____no_output_____"
],
[
"sns.displot(data = Avg, bins = 15)",
"_____no_output_____"
]
],
[
[
"From the graphs above it can be seen that there is overall low recognition for 90s songs. However, Millenials remember more songs better. This is also shown in the boxplot below.",
"_____no_output_____"
]
],
[
[
"sns.boxplot(data = df)",
"_____no_output_____"
]
],
[
[
"# Analysis",
"_____no_output_____"
],
[
"The charts above, as mentioned, show that mellinnials recognize more songs from the 90s than generaztion Z does. That being said it can also be seen that when the average of both generations is taken, there is a very low recognition of songs in general. ",
"_____no_output_____"
],
[
"The datasets also show which songs were rememered most and least by each generation. ",
"_____no_output_____"
],
[
"For millennials the top most rememebered songs include:\n1. Hit Me Baby One More Time by Britney Spears\n2. Believe by Cher\n3. Wannabe by Spice Girls\n4. All Star by Smash Mouth\n5. Mambo No. 5 by Lou Bega",
"_____no_output_____"
],
[
"The songs least remembered by that same generation are:\n \n 1. Real, Real, Real by Jesus Jones\n 2. I'd Die without you by PM Dawn\n 3. This House by Tracie Spencer\n 4. Hold You Tight by Tara Kemp\n 5. Love Will Lead Back by Taylor Dane",
"_____no_output_____"
],
[
"For generation z the top most rememebered songs include:\n1. My Heart Will Go On by Celine Dion\n2. Hit Me Baby One More Time by Britney Spears\n3. Wannabe by Spice Girls\n4. All Star by Smash Mouth\n5. Mambo No. 5 by Lou Bega",
"_____no_output_____"
],
[
"The songs least remembered by that same generation are:\n \n 1. Kickin' the Boots by H Town\n 2. Changing Faces by Stroke You Up\n 3. I'd Die without you by PM Dawn\n 4. Look into my Eyes by Bones Thugs N Harmony\n 5. Love Will Lead Back by Taylor Dane",
"_____no_output_____"
],
[
"The lists featuring the top songs might have the implication that songs written and performed by female artists might be more successful in order to gain better recognition\n\nHowever, the lists featuring the least most recognized songs discredits the previous claim as it also contains a majority of female artists. That being said it could be implied that the dataset contains more female artists, or the gender identity of the artist does not contribute to their ability to be recognized. ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
4a262330e1bda2a6e998346fe1266267612b6a6b
| 20,555 |
ipynb
|
Jupyter Notebook
|
tutorial_analysis.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null |
tutorial_analysis.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null |
tutorial_analysis.ipynb
|
ccbrain/nipype_tutorial
|
2f7f17bf0a902e273839f0c0dc8eaf61eee13351
|
[
"BSD-3-Clause"
] | null | null | null | 37.577697 | 293 | 0.557091 |
[
[
[
"# Hands-on 2: How to create a fMRI analysis workflow\n\nThe purpose of this section is that you setup a fMRI analysis workflow. \n",
"_____no_output_____"
],
[
"# 1st-level Analysis Workflow Structure\n\nIn this notebook we will create a workflow that performs 1st-level analysis and normalizes the resulting beta weights to the MNI template. In concrete steps this means:\n\n 1. Specify 1st-level model parameters\n 2. Specify 1st-level contrasts\n 3. Estimate 1st-level contrasts\n 4. Normalize 1st-level contrasts",
"_____no_output_____"
],
[
"## Imports\n\nIt's always best to have all relevant module imports at the beginning of your script. So let's import what we most certainly need.",
"_____no_output_____"
]
],
[
[
"from nilearn import plotting\n%matplotlib inline\n\n# Get the Node and Workflow object\nfrom nipype import Node, Workflow\n\n# Specify which SPM to use\nfrom nipype.interfaces.matlab import MatlabCommand\nMatlabCommand.set_default_paths('/opt/spm12-dev/spm12_mcr/spm/spm12')",
"_____no_output_____"
]
],
[
[
"## Create Nodes and Workflow connections\n\nLet's create all the nodes that we need! Make sure to specify all relevant inputs and keep in mind which ones you later on need to connect in your pipeline.\n",
"_____no_output_____"
],
[
"### Specify 1st-level model parameters (stimuli onsets, duration, etc.)",
"_____no_output_____"
],
[
"The specify the 1st-level model we need the subject specific onset times and durations of the stimuli. Luckily, as we are working with a BIDS dataset, this information is nicely stored in a `tsv` file:",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\n# Create the workflow here\nanalysis1st = Workflow(name='work_1st', base_dir='/output/')\ntrialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\ntrialinfo\n",
"_____no_output_____"
],
[
"import pandas as pd\nfrom nipype.interfaces.base import Bunch\nfor group in trialinfo.groupby('trial_type'):\n print(group)\n print(\"\")\ntrialinfo = pd.read_table('/data/ds000114/task-fingerfootlips_events.tsv')\nconditions = []\nonsets = []\ndurations = []\n\nfor group in trialinfo.groupby('trial_type'):\n conditions.append(group[0])\n onsets.append(list(group[1].onset -10)) # subtracting 10s due to removing of 4 dummy scans\n durations.append(group[1].duration.tolist())\n\nsubject_info = [Bunch(conditions=conditions,\n onsets=onsets,\n durations=durations,\n )]\n\nfrom nipype.algorithms.modelgen import SpecifySPMModel\n\n# Initiate the SpecifySPMModel node here\nmodelspec = Node(SpecifySPMModel(concatenate_runs=False,\n input_units='secs',\n output_units='secs',\n time_repetition=2.5,\n high_pass_filter_cutoff=128,\n subject_info=subject_info),\n name=\"modelspec\")",
"_____no_output_____"
]
],
[
[
"This node will also need some additional inputs, such as the preprocessed functional images, the motion parameters etc. We will specify those once we take care of the workflow data input stream.",
"_____no_output_____"
],
[
"### Specify 1st-level contrasts\n\nTo do any GLM analysis, we need to also define the contrasts that we want to investigate. If we recap, we had three different conditions in the **fingerfootlips** task in this dataset:\n\n- **finger**\n- **foot**\n- **lips**\n\nTherefore, we could create the following contrasts (seven T-contrasts and two F-contrasts):",
"_____no_output_____"
]
],
[
[
"# Condition names\ncondition_names = ['Finger', 'Foot', 'Lips']\n\n# Contrasts\ncont01 = ['average', 'T', condition_names, [1/3., 1/3., 1/3.]]\ncont02 = ['Finger', 'T', condition_names, [1, 0, 0]]\ncont03 = ['Foot', 'T', condition_names, [0, 1, 0]]\ncont04 = ['Lips', 'T', condition_names, [0, 0, 1]]\ncont05 = ['Finger > others','T', condition_names, [1, -0.5, -0.5]]\ncont06 = ['Foot > others', 'T', condition_names, [-0.5, 1, -0.5]]\ncont07 = ['Lips > others', 'T', condition_names, [-0.5, -0.5, 1]]\n\ncont08 = ['activation', 'F', [cont02, cont03, cont04]]\ncont09 = ['differences', 'F', [cont05, cont06, cont07]]\n\ncontrast_list = [cont01, cont02, cont03, cont04, cont05, cont06, cont07, cont08, cont09]",
"_____no_output_____"
]
],
[
[
"### Estimate 1st-level contrasts\n\nBefore we can estimate the 1st-level contrasts, we first need to create the 1st-level design. Here you can also specify what kind of basis function you want (HRF, FIR, Fourier, etc.), if you want to use time and dispersion derivatives and how you want to model the serial correlation.\n\nIn this example I propose that you use an HRF basis function, that we model time derivatives and that we model the serial correlation with AR(1).",
"_____no_output_____"
]
],
[
[
"from nipype.interfaces.spm import Level1Design\n# Initiate the Level1Design node here\nlevel1design = Node(Level1Design(bases={'hrf': {'derivs': [1, 0]}},\n timing_units='secs',\n interscan_interval=2.5,\n model_serial_correlations='AR(1)'),\n name=\"level1design\")\n\n\n# Now that we have the Model Specification and 1st-Level Design node, we can connect them to each other:\n# Connect the two nodes here\nanalysis1st.connect([(modelspec, level1design, [('session_info',\n 'session_info')])])\n\n# Now we need to estimate the model. I recommend that you'll use a Classical: 1 method to estimate the model.\nfrom nipype.interfaces.spm import EstimateModel\n# Initiate the EstimateModel node here\nlevel1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),\n name=\"level1estimate\")\n\n# Now we can connect the 1st-Level Design node with the model estimation node.\n# Connect the two nodes here\nanalysis1st.connect([(level1design, level1estimate, [('spm_mat_file',\n 'spm_mat_file')])])\nfrom nipype.interfaces.spm import EstimateContrast\n# Initiate the EstimateContrast node here\nlevel1conest = Node(EstimateContrast(contrasts=contrast_list),\n name=\"level1conest\")\n\n# Now we can connect the model estimation node with the contrast estimation node.\nanalysis1st.connect([(level1estimate, level1conest, [('spm_mat_file',\n 'spm_mat_file'),\n ('beta_images',\n 'beta_images'),\n ('residual_image',\n 'residual_image')])])",
"_____no_output_____"
]
],
[
[
"## Normalize 1st-level contrasts\n\nNow that the contrasts were estimated in subject space we can put them into a common reference space by normalizing them to a specific template. In this case we will be using SPM12's Normalize routine and normalize to the SPM12 tissue probability map `TPM.nii`.\n\nAt this step you can also specify the voxel resolution of the output volumes. If you don't specify it, it will normalize to a voxel resolution of 2x2x2mm. ",
"_____no_output_____"
]
],
[
[
"from nipype.interfaces.spm import Normalize12\n\n# Location of the template\ntemplate = '/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii'\n\n# Initiate the Normalize12 node here\nnormalize = Node(Normalize12(jobtype='estwrite',\n tpm=template,\n write_voxel_sizes=[2, 2, 2]\n ),\n name=\"normalize\")\n\n# Connect the nodes here\nanalysis1st.connect([(level1conest, normalize, [('spmT_images',\n 'apply_to_files')])\n ])",
"_____no_output_____"
]
],
[
[
"## Datainput with `SelectFiles` and `iterables` ",
"_____no_output_____"
]
],
[
[
"# Import the SelectFiles\nfrom nipype import SelectFiles\n\n# String template with {}-based strings\ntemplates = {'anat': '/data/ds000114/sub-{subj_id}/ses-test/anat/sub-{subj_id}_ses-test_T1w.nii.gz',\n 'func': '/output/datasink_handson/preproc/sub-{subj_id}_detrend.nii.gz',\n 'mc_param': '/output/datasink_handson/preproc/sub-{subj_id}.par',\n 'outliers': '/output/datasink_handson/preproc/art.sub-{subj_id}_outliers.txt'\n }\n\n# Create SelectFiles node\nsf = Node(SelectFiles(templates, sort_filelist=True),\n name='selectfiles')\n\n# Now we can specify over which subjects the workflow should iterate. \n# list of subject identifiers\nsubject_list = ['07']\nsf.iterables = [('subj_id', subject_list)]\n\n\n# Gunzip Node\n\nfrom nipype.algorithms.misc import Gunzip\n# Initiate the two Gunzip node here\ngunzip_anat = Node(Gunzip(), name='gunzip_anat')\ngunzip_func = Node(Gunzip(), name='gunzip_func')\n\n\n# And as a final step, we just need to connect this SelectFiles node to the rest of the workflow.\n# Connect SelectFiles node to the other nodes here\nanalysis1st.connect([(sf, gunzip_anat, [('anat', 'in_file')]),\n (sf, gunzip_func, [('func', 'in_file')]),\n (gunzip_anat, normalize, [('out_file', 'image_to_align')]),\n (gunzip_func, modelspec, [('out_file', 'functional_runs')]),\n (sf, modelspec, [('mc_param', 'realignment_parameters'),\n ('outliers', 'outlier_files'),\n ])\n ])\n\n#Data output with DataSink\n#Now, before we run the workflow, let's again specify a Datasink folder to only keep those files that we want to keep.\nfrom nipype.interfaces.io import DataSink\n# Initiate DataSink node here\n# Initiate the datasink node\noutput_folder = 'datasink_handson'\ndatasink = Node(DataSink(base_directory='/output/',\n container=output_folder),\n name=\"datasink\")\n## Use the following substitutions for the DataSink output\nsubstitutions = [('_subj_id_', 'sub-')]\ndatasink.inputs.substitutions = substitutions\n\n# Connect nodes to datasink here\nanalysis1st.connect([(level1conest, datasink, [('spm_mat_file', '1stLevel.@spm_mat'),\n ('spmT_images', '1stLevel.@T'),\n ('spmF_images', '1stLevel.@F'),\n ]),\n (normalize, datasink, [('normalized_files', 'normalized.@files'),\n ('normalized_image', 'normalized.@image'),\n ]),\n ])",
"_____no_output_____"
]
],
[
[
"## Visualize the workflow\n\nNow that the workflow is finished, let's visualize it again.",
"_____no_output_____"
]
],
[
[
"# Create 1st-level analysis output graph\nanalysis1st.write_graph(graph2use='colored', format='png', simple_form=True)\n\n# Visualize the graph\nfrom IPython.display import Image\nImage(filename='/output/work_1st/graph.png')",
"_____no_output_____"
]
],
[
[
"## Run the Workflow\n\nNow that everything is ready, we can run the 1st-level analysis workflow. Change ``n_procs`` to the number of jobs/cores you want to use.",
"_____no_output_____"
]
],
[
[
"analysis1st.run('MultiProc', plugin_args={'n_procs': 4})",
"_____no_output_____"
]
],
[
[
"## Visualize results",
"_____no_output_____"
],
[
"### First, let's look at the 1st-level Design Matrix of subject one, to verify that everything is as it should be.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom scipy.io import loadmat\n\n# Using scipy's loadmat function we can access SPM.mat\nspmmat = loadmat('/output/datasink_handson/1stLevel/sub-07/SPM.mat',\n struct_as_record=False)\ndesignMatrix = spmmat['SPM'][0][0].xX[0][0].X\nnames = [i[0] for i in spmmat['SPM'][0][0].xX[0][0].name[0]]\nnormed_design = designMatrix / np.abs(designMatrix).max(axis=0)\nfig, ax = plt.subplots(figsize=(8, 8))\nplt.imshow(normed_design, aspect='auto', cmap='gray', interpolation='none')\nax.set_ylabel('Volume id')\nax.set_xticks(np.arange(len(names)))\nax.set_xticklabels(names, rotation=90);",
"_____no_output_____"
]
],
[
[
"### Let's look how well the normalization worked.",
"_____no_output_____"
]
],
[
[
"import nibabel as nb\nfrom nilearn.plotting import plot_anat\nfrom nilearn.plotting import plot_glass_brain\n# Load GM probability map of TPM.nii\nimg = nb.load('/opt/spm12-dev/spm12_mcr/spm/spm12/tpm/TPM.nii')\nGM_template = nb.Nifti1Image(img.get_data()[..., 0], img.affine, img.header)\n\n# Plot normalized subject anatomy\ndisplay = plot_anat('/output/datasink_handson/normalized/sub-07/wsub-07_ses-test_T1w.nii',\n dim=-0.1)\n\n# Overlay in edges GM map\ndisplay.add_edges(GM_template)\n\n# Plot raw subject anatomy\ndisplay = plot_anat('/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz',\n dim=-0.1)\n\n# Overlay in edges GM map\ndisplay.add_edges(GM_template)",
"_____no_output_____"
]
],
[
[
"### Let's look at the contrasts of one subject that we've just computed.",
"_____no_output_____"
]
],
[
[
"from nilearn.plotting import plot_stat_map\nanatimg = '/data/ds000114/sub-07/ses-test/anat/sub-07_ses-test_T1w.nii.gz'\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0001.nii', title='average',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0002.nii', title='finger',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0003.nii', title='foot',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0004.nii', title='lip',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);",
"_____no_output_____"
]
],
[
[
"### We can also check three additional contrasts Finger > others, Foot > others and Lips > others.",
"_____no_output_____"
]
],
[
[
"plot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0005.nii', title='fingers > other',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0006.nii', title='foot > other',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);\nplot_stat_map('/output/datasink_handson/1stLevel/sub-07/spmT_0007.nii', title='lip > other',\n bg_img=anatimg, threshold=3, display_mode='y', cut_coords=(-5, 0, 5, 10, 15), dim=-1);",
"_____no_output_____"
]
],
[
[
"### We can plot the normalized results over a template brain",
"_____no_output_____"
]
],
[
[
"plot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0005.nii',\n colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n title='fingers>other');\nplot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0006.nii',\n colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n title='foot>other');\nplot_glass_brain('/output/datasink_handson/normalized/sub-07/wspmT_0007.nii',\n colorbar=True, display_mode='lyrz', black_bg=True, threshold=3,\n title='lip>other');",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a262c1b2e0e25a5c9bdb69a076e80756209bbab
| 101,851 |
ipynb
|
Jupyter Notebook
|
examples/notebook/examples/bus_driver_scheduling_sat.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2020-07-18T16:24:09.000Z
|
2020-07-18T16:24:09.000Z
|
examples/notebook/examples/bus_driver_scheduling_sat.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2021-02-23T10:22:55.000Z
|
2021-02-23T13:57:14.000Z
|
examples/notebook/examples/bus_driver_scheduling_sat.ipynb
|
jspricke/or-tools
|
45770b833997f827d322e929b1ed4781c4e60d44
|
[
"Apache-2.0"
] | 1 |
2021-03-16T14:30:59.000Z
|
2021-03-16T14:30:59.000Z
| 51.622402 | 93 | 0.415293 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a26312d77b4256ce09983e297447e83f4348676
| 75,696 |
ipynb
|
Jupyter Notebook
|
facecruncher/FamousFacesCleanup.ipynb
|
siavash9000/famousfaces
|
672c2fe6c6c8406622614a34f1def2c6b08e7a3c
|
[
"MIT"
] | null | null | null |
facecruncher/FamousFacesCleanup.ipynb
|
siavash9000/famousfaces
|
672c2fe6c6c8406622614a34f1def2c6b08e7a3c
|
[
"MIT"
] | 12 |
2020-07-18T01:17:51.000Z
|
2022-02-18T07:55:47.000Z
|
facecruncher/FamousFacesCleanup.ipynb
|
siavash9000/famousfaces
|
672c2fe6c6c8406622614a34f1def2c6b08e7a3c
|
[
"MIT"
] | null | null | null | 257.469388 | 45,457 | 0.894565 |
[
[
[
"import scipy.io as sio\nmat = sio.loadmat(\"./imdb/imdb.mat\")",
"_____no_output_____"
],
[
"from IPython.core.display import Image \nidx = 11114\npath ='./imdb_crop/' + mat['imdb'].item()[2][0][idx][0]\nprint(mat['imdb'].item()[4][0][idx][0])\nprint(mat['imdb'].item()[2][0][idx][0])\nImage(filename=path)",
"Jamie Lee Curtis\n30/nm0000130_rm1504221952_1958-11-22_2005.jpg\n"
],
[
"import numpy\nembeddings = numpy.load('./embeddings.npy')\nimage_list = numpy.load('./image_list.npy')",
"_____no_output_____"
],
[
"file2name = {}\nfor idx in range(0, len(mat['imdb'].item()[2][0])):\n file2name[mat['imdb'].item()[2][0][idx][0]] = mat['imdb'].item()[4][0][idx][0]",
"_____no_output_____"
],
[
"cleaned_imagelist = list(map(lambda x: x.split('/')[-2]+'/' + x.split('/')[-1], image_list))",
"_____no_output_____"
],
[
"idx = 803\nprint(file2name[cleaned_imagelist[idx]])\npath ='./imdb_crop/' + cleaned_imagelist[idx]\nImage(filename=path)",
"Peter Riegert\n"
],
[
"name2file = {}\nfor file_name, name in file2name.items():\n if name in name2file:\n name2file[name].append(file_name)\n else:\n name2file[name] = [file_name]",
"_____no_output_____"
],
[
"filtered_name2file = {k: v for k, v in name2file.items() if len(v) >= 100}\nlen(list(filtered_name2file.items()))",
"_____no_output_____"
],
[
"example = list(filtered_name2file.keys())[1]\nprint(example)\nvecs = [embeddings[cleaned_imagelist.index(x)] for x in name2file[example]]\nprint(len(vecs))\nfrom scipy.spatial.distance import pdist\nfrom scipy.spatial.distance import squareform\ndists = squareform(pdist(vecs,'cosine'))\n\nfrom sklearn.cluster import DBSCAN\nfrom sklearn import metrics\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_distances as cosine\n\nlabels = DBSCAN(eps=0.1, min_samples=20, metric='precomputed').fit_predict(dists)\nfrom collections import Counter\ncounts = Counter(labels)\ncounts.pop(-1, None)\nprint(counts)\nbiggest_cluster = counts.most_common(1)\nprint(np.where(labels == biggest_cluster[0][0]))",
"Raquel Welch\n113\nCounter({0: 22})\n(array([ 24, 27, 29, 36, 47, 54, 63, 71, 74, 77, 81, 87, 92,\n 94, 97, 100, 102, 105, 106, 107, 108, 109]),)\n"
],
[
"print(np.where(labels == -1))",
"(array([ 8, 9, 10, 11, 12, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 31, 35, 36, 37, 39, 40, 44, 46, 48, 49, 51, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 65, 66, 68, 69, 70, 72, 74, 77, 78, 79, 80, 85, 92]),)\n"
],
[
"path ='./imdb_crop/' + filtered_name2file[example][74]\nfrom IPython.core.display import Image as DImage\nDImage(filename=path)",
"_____no_output_____"
],
[
"from annoy import AnnoyIndex\nimport random\n\nf = 40\nt = AnnoyIndex(f) # Length of item vector that will be indexed\nfor i in xrange(1000):\n v = [random.gauss(0, 1) for z in xrange(f)]\n t.add_item(i, v)\n\nt.build(10) # 10 trees\nt.save('test.ann')\n\n\nu = AnnoyIndex(f)\nu.load('test.ann') # super fast, will just mmap the file\nprint(u.get_nns_by_item(0, 1000)) ",
"_____no_output_____"
],
[
"from PIL import Image\ndef is_grey_scale(img_path=\"lena.jpg\"):\n im = Image.open(img_path).convert('RGB')\n w,h = im.size\n print(w,h)\n for i in range(w):\n for j in range(h):\n r,g,b = im.getpixel((i,j))\n if r != g != b: return False\n return True\nprint(is_grey_scale(path))",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2637dd66a2124df375eca45583812c0a854d96
| 28,683 |
ipynb
|
Jupyter Notebook
|
08_Text_Analysis/03 - Text Classification - SMS Ham vs. Spam - Word Embeddings + CNN.ipynb
|
lubas569/tf-estimator-tutorials
|
ed1d56c08606478f012c67ef9a1fd78d90938512
|
[
"Apache-2.0"
] | 685 |
2018-01-18T03:25:08.000Z
|
2022-03-19T06:54:02.000Z
|
08_Text_Analysis/03 - Text Classification - SMS Ham vs. Spam - Word Embeddings + CNN.ipynb
|
lubas569/tf-estimator-tutorials
|
ed1d56c08606478f012c67ef9a1fd78d90938512
|
[
"Apache-2.0"
] | 9 |
2020-09-26T01:26:59.000Z
|
2022-02-10T02:09:25.000Z
|
08_Text_Analysis/03 - Text Classification - SMS Ham vs. Spam - Word Embeddings + CNN.ipynb
|
lubas569/tf-estimator-tutorials
|
ed1d56c08606478f012c67ef9a1fd78d90938512
|
[
"Apache-2.0"
] | 259 |
2018-01-22T12:00:44.000Z
|
2022-03-12T22:57:10.000Z
| 36.445997 | 570 | 0.528606 |
[
[
[
"## UCI SMS Spam Collection Dataset\n\n* **Input**: sms textual content. **Target**: ham or spam\n* **data representation**: each sms is repesented with a **fixed-length vector of word indexes**. A word index lookup is generated from the vocabulary list.\n* **words embedding**: A word embedding (dense vector) is learnt for each word. That is, each sms is presented as a matrix of (document-word-count, word-embedding-size)\n* **convolution layer**: Apply filter(s) to the word-embedding matrix, before input to the fully-connected NN\n* **train-data.tsv, valid-datat.tsv**, and **vocab_list.tsv** are prepared and saved in 'data/sms-spam'",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\nfrom tensorflow import data\nfrom datetime import datetime\nimport multiprocessing\nimport shutil\n\nprint(tf.__version__)",
"/Users/khalidsalama/anaconda/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6\n return f(*args, **kwds)\n"
],
[
"MODEL_NAME = 'sms-class-model-01'\n\nTRAIN_DATA_FILES_PATTERN = 'data/sms-spam/train-*.tsv'\nVALID_DATA_FILES_PATTERN = 'data/sms-spam/valid-*.tsv'\n\nVOCAB_LIST_FILE = 'data/sms-spam/vocab_list.tsv'\nN_WORDS_FILE = 'data/sms-spam/n_words.tsv'\n\nRESUME_TRAINING = False\nMULTI_THREADING = True",
"_____no_output_____"
]
],
[
[
"## 1. Define Dataset Metadata",
"_____no_output_____"
]
],
[
[
"MAX_DOCUMENT_LENGTH = 100\n\nPAD_WORD = '#=KS=#'\n\nHEADER = ['class', 'sms']\nHEADER_DEFAULTS = [['NA'], ['NA']]\n\nTEXT_FEATURE_NAME = 'sms'\n\nTARGET_NAME = 'class'\n\nWEIGHT_COLUNM_NAME = 'weight'\n\nTARGET_LABELS = ['spam', 'ham']\n\nwith open(N_WORDS_FILE) as file:\n N_WORDS = int(file.read())+2\nprint(N_WORDS)",
"11332\n"
]
],
[
[
"## 2. Define Data Input Function\n",
"_____no_output_____"
],
[
"### a. TSV parsing logic",
"_____no_output_____"
]
],
[
[
"def parse_tsv_row(tsv_row):\n \n columns = tf.decode_csv(tsv_row, record_defaults=HEADER_DEFAULTS, field_delim='\\t')\n features = dict(zip(HEADER, columns))\n \n target = features.pop(TARGET_NAME)\n \n # giving more weight to \"spam\" records are the are only 13% of the training set\n features[WEIGHT_COLUNM_NAME] = tf.cond( tf.equal(target,'spam'), lambda: 6.6, lambda: 1.0 ) \n\n return features, target",
"_____no_output_____"
]
],
[
[
"### b. Data pipeline input function",
"_____no_output_____"
]
],
[
[
"def parse_label_column(label_string_tensor):\n table = tf.contrib.lookup.index_table_from_tensor(tf.constant(TARGET_LABELS))\n return table.lookup(label_string_tensor)\n\ndef input_fn(files_name_pattern, mode=tf.estimator.ModeKeys.EVAL, \n skip_header_lines=0, \n num_epochs=1, \n batch_size=200):\n \n shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False\n \n num_threads = multiprocessing.cpu_count() if MULTI_THREADING else 1\n \n buffer_size = 2 * batch_size + 1\n \n print(\"\")\n print(\"* data input_fn:\")\n print(\"================\")\n print(\"Input file(s): {}\".format(files_name_pattern))\n print(\"Batch size: {}\".format(batch_size))\n print(\"Epoch Count: {}\".format(num_epochs))\n print(\"Mode: {}\".format(mode))\n print(\"Thread Count: {}\".format(num_threads))\n print(\"Shuffle: {}\".format(shuffle))\n print(\"================\")\n print(\"\")\n\n file_names = tf.matching_files(files_name_pattern)\n dataset = data.TextLineDataset(filenames=file_names)\n \n dataset = dataset.skip(skip_header_lines)\n \n if shuffle:\n dataset = dataset.shuffle(buffer_size)\n \n dataset = dataset.map(lambda tsv_row: parse_tsv_row(tsv_row), \n num_parallel_calls=num_threads)\n \n dataset = dataset.batch(batch_size)\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.prefetch(buffer_size)\n \n iterator = dataset.make_one_shot_iterator()\n \n features, target = iterator.get_next()\n return features, parse_label_column(target)",
"_____no_output_____"
]
],
[
[
"## 3. Define Model Function",
"_____no_output_____"
]
],
[
[
"def process_text(text_feature):\n \n # Load vocabolary lookup table to map word => word_id\n vocab_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=VOCAB_LIST_FILE, \n num_oov_buckets=1, default_value=-1)\n # Get text feature\n smss = text_feature\n # Split text to words -> this will produce sparse tensor with variable-lengthes (word count) entries\n words = tf.string_split(smss)\n # Convert sparse tensor to dense tensor by padding each entry to match the longest in the batch\n dense_words = tf.sparse_tensor_to_dense(words, default_value=PAD_WORD)\n # Convert word to word_ids via the vocab lookup table\n word_ids = vocab_table.lookup(dense_words)\n # Create a word_ids padding\n padding = tf.constant([[0,0],[0,MAX_DOCUMENT_LENGTH]])\n # Pad all the word_ids entries to the maximum document length\n word_ids_padded = tf.pad(word_ids, padding)\n word_id_vector = tf.slice(word_ids_padded, [0,0], [-1, MAX_DOCUMENT_LENGTH])\n \n # Return the final word_id_vector\n return word_id_vector\n\n\ndef model_fn(features, labels, mode, params):\n \n hidden_units = params.hidden_units\n output_layer_size = len(TARGET_LABELS)\n embedding_size = params.embedding_size\n window_size = params.window_size\n stride = int(window_size/2)\n filters = params.filters\n \n # word_id_vector\n word_id_vector = process_text(features[TEXT_FEATURE_NAME]) \n # print(\"word_id_vector: {}\".format(word_id_vector)) # (?, MAX_DOCUMENT_LENGTH)\n \n # layer to take each word_id and convert it into vector (embeddings) \n word_embeddings = tf.contrib.layers.embed_sequence(word_id_vector, vocab_size=N_WORDS, \n embed_dim=embedding_size) \n #print(\"word_embeddings: {}\".format(word_embeddings)) # (?, MAX_DOCUMENT_LENGTH, embbeding_size)\n \n # convolution\n words_conv = tf.layers.conv1d(word_embeddings, filters=filters, kernel_size=window_size, \n strides=stride, padding='SAME', activation=tf.nn.relu)\n \n #print(\"words_conv: {}\".format(words_conv)) # (?, MAX_DOCUMENT_LENGTH/stride, filters)\n \n words_conv_shape = words_conv.get_shape()\n dim = words_conv_shape[1] * words_conv_shape[2]\n input_layer = tf.reshape(words_conv,[-1, dim])\n #print(\"input_layer: {}\".format(input_layer)) # (?, (MAX_DOCUMENT_LENGTH/stride)*filters)\n \n if hidden_units is not None:\n \n # Create a fully-connected layer-stack based on the hidden_units in the params\n hidden_layers = tf.contrib.layers.stack(inputs=input_layer,\n layer=tf.contrib.layers.fully_connected,\n stack_args= hidden_units,\n activation_fn=tf.nn.relu)\n # print(\"hidden_layers: {}\".format(hidden_layers)) # (?, last-hidden-layer-size)\n \n else:\n hidden_layers = input_layer\n\n # Connect the output layer (logits) to the hidden layer (no activation fn)\n logits = tf.layers.dense(inputs=hidden_layers, \n units=output_layer_size, \n activation=None)\n \n # print(\"logits: {}\".format(logits)) # (?, output_layer_size)\n\n # Provide an estimator spec for `ModeKeys.PREDICT`.\n if mode == tf.estimator.ModeKeys.PREDICT:\n probabilities = tf.nn.softmax(logits)\n predicted_indices = tf.argmax(probabilities, 1)\n\n # Convert predicted_indices back into strings\n predictions = {\n 'class': tf.gather(TARGET_LABELS, predicted_indices),\n 'probabilities': probabilities\n }\n export_outputs = {\n 'prediction': tf.estimator.export.PredictOutput(predictions)\n }\n \n # Provide an estimator spec for `ModeKeys.PREDICT` modes.\n return tf.estimator.EstimatorSpec(mode,\n predictions=predictions,\n export_outputs=export_outputs)\n \n # weights\n weights = features[WEIGHT_COLUNM_NAME]\n\n # Calculate loss using softmax cross entropy\n loss = tf.losses.sparse_softmax_cross_entropy(\n logits=logits, labels=labels, \n weights=weights\n )\n \n tf.summary.scalar('loss', loss)\n \n if mode == tf.estimator.ModeKeys.TRAIN:\n # Create Optimiser\n optimizer = tf.train.AdamOptimizer(params.learning_rate)\n\n # Create training operation\n train_op = optimizer.minimize(\n loss=loss, global_step=tf.train.get_global_step())\n\n # Provide an estimator spec for `ModeKeys.TRAIN` modes.\n return tf.estimator.EstimatorSpec(mode=mode,\n loss=loss, \n train_op=train_op)\n \n\n if mode == tf.estimator.ModeKeys.EVAL:\n probabilities = tf.nn.softmax(logits)\n predicted_indices = tf.argmax(probabilities, 1)\n\n # Return accuracy and area under ROC curve metrics\n labels_one_hot = tf.one_hot(\n labels,\n depth=len(TARGET_LABELS),\n on_value=True,\n off_value=False,\n dtype=tf.bool\n )\n \n eval_metric_ops = {\n 'accuracy': tf.metrics.accuracy(labels, predicted_indices, weights=weights),\n 'auroc': tf.metrics.auc(labels_one_hot, probabilities, weights=weights)\n }\n \n # Provide an estimator spec for `ModeKeys.EVAL` modes.\n return tf.estimator.EstimatorSpec(mode, \n loss=loss, \n eval_metric_ops=eval_metric_ops)\n\ndef create_estimator(run_config, hparams):\n estimator = tf.estimator.Estimator(model_fn=model_fn, \n params=hparams, \n config=run_config)\n \n print(\"\")\n print(\"Estimator Type: {}\".format(type(estimator)))\n print(\"\")\n\n return estimator",
"_____no_output_____"
]
],
[
[
"## 4. Run Experiment",
"_____no_output_____"
],
[
"### a. Set HParam and RunConfig",
"_____no_output_____"
]
],
[
[
"TRAIN_SIZE = 4179\nNUM_EPOCHS = 10\nBATCH_SIZE = 250\nEVAL_AFTER_SEC = 60\nTOTAL_STEPS = int((TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS)\n\nhparams = tf.contrib.training.HParams(\n num_epochs = NUM_EPOCHS,\n batch_size = BATCH_SIZE,\n embedding_size = 3,\n window_size = 3,\n filters = 2,\n hidden_units=None, #[8],\n max_steps = TOTAL_STEPS,\n learning_rate = 0.01\n)\n\nmodel_dir = 'trained_models/{}'.format(MODEL_NAME)\n\nrun_config = tf.estimator.RunConfig(\n log_step_count_steps=5000,\n tf_random_seed=19830610,\n model_dir=model_dir\n)\n\nprint(hparams)\nprint(\"Model Directory:\", run_config.model_dir)\nprint(\"\")\nprint(\"Dataset Size:\", TRAIN_SIZE)\nprint(\"Batch Size:\", BATCH_SIZE)\nprint(\"Steps per Epoch:\",TRAIN_SIZE/BATCH_SIZE)\nprint(\"Total Steps:\", TOTAL_STEPS)\nprint(\"That is 1 evaluation step after each\",EVAL_AFTER_SEC,\"training seconds\")",
"[('batch_size', 250), ('embedding_size', 3), ('filters', 2), ('hidden_units', None), ('learning_rate', 0.01), ('max_steps', 167), ('num_epochs', 10), ('window_size', 3)]\nModel Directory: trained_models/sms-class-model-01\n\nDataset Size: 4179\nBatch Size: 250\nSteps per Epoch: 16.716\nTotal Steps: 167\nThat is 1 evaluation step after each 60 training seconds\n"
]
],
[
[
"### b. Define serving function",
"_____no_output_____"
]
],
[
[
"def serving_input_fn():\n \n receiver_tensor = {\n 'sms': tf.placeholder(tf.string, [None]),\n }\n \n features = {\n key: tensor\n for key, tensor in receiver_tensor.items()\n }\n \n return tf.estimator.export.ServingInputReceiver(\n features, receiver_tensor)",
"_____no_output_____"
]
],
[
[
"### c. Define TrainSpec and EvaluSpec",
"_____no_output_____"
]
],
[
[
"train_spec = tf.estimator.TrainSpec(\n input_fn = lambda: input_fn(\n TRAIN_DATA_FILES_PATTERN,\n mode = tf.estimator.ModeKeys.TRAIN,\n num_epochs=hparams.num_epochs,\n batch_size=hparams.batch_size\n ),\n max_steps=hparams.max_steps,\n hooks=None\n)\n\neval_spec = tf.estimator.EvalSpec(\n input_fn = lambda: input_fn(\n VALID_DATA_FILES_PATTERN,\n mode=tf.estimator.ModeKeys.EVAL,\n batch_size=hparams.batch_size\n ),\n exporters=[tf.estimator.LatestExporter(\n name=\"predict\", # the name of the folder in which the model will be exported to under export\n serving_input_receiver_fn=serving_input_fn,\n exports_to_keep=1,\n as_text=True)],\n steps=None,\n throttle_secs = EVAL_AFTER_SEC\n)",
"_____no_output_____"
]
],
[
[
"### d. Run Experiment via train_and_evaluate",
"_____no_output_____"
]
],
[
[
"if not RESUME_TRAINING:\n print(\"Removing previous artifacts...\")\n shutil.rmtree(model_dir, ignore_errors=True)\nelse:\n print(\"Resuming training...\") \n\n \ntf.logging.set_verbosity(tf.logging.INFO)\n\ntime_start = datetime.utcnow() \nprint(\"Experiment started at {}\".format(time_start.strftime(\"%H:%M:%S\")))\nprint(\".......................................\") \n\nestimator = create_estimator(run_config, hparams)\n\ntf.estimator.train_and_evaluate(\n estimator=estimator,\n train_spec=train_spec, \n eval_spec=eval_spec\n)\n\ntime_end = datetime.utcnow() \nprint(\".......................................\")\nprint(\"Experiment finished at {}\".format(time_end.strftime(\"%H:%M:%S\")))\nprint(\"\")\ntime_elapsed = time_end - time_start\nprint(\"Experiment elapsed time: {} seconds\".format(time_elapsed.total_seconds()))\n ",
"Removing previous artifacts...\nExperiment started at 16:22:44\n.......................................\nINFO:tensorflow:Using config: {'_model_dir': 'trained_models/sms-class-model-01', '_tf_random_seed': 19830610, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 5000, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11401f908>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n\nEstimator Type: <class 'tensorflow.python.estimator.estimator.Estimator'>\n\nINFO:tensorflow:Running training and evaluation locally (non-distributed).\nINFO:tensorflow:Start train and evaluate loop. The evaluate will happen after 60 secs (eval_spec.throttle_secs) or training is finished.\n\n* data input_fn:\n================\nInput file(s): data/sms-spam/train-*.tsv\nBatch size: 250\nEpoch Count: 10\nMode: train\nThread Count: 4\nShuffle: True\n================\n\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Saving checkpoints for 1 into trained_models/sms-class-model-01/model.ckpt.\nINFO:tensorflow:loss = 1.11302, step = 1\nINFO:tensorflow:loss = 0.00906904, step = 101 (0.864 sec)\nINFO:tensorflow:Saving checkpoints for 167 into trained_models/sms-class-model-01/model.ckpt.\nINFO:tensorflow:Loss for final step: 0.00403102.\n\n* data input_fn:\n================\nInput file(s): data/sms-spam/valid-*.tsv\nBatch size: 250\nEpoch Count: 1\nMode: eval\nThread Count: 4\nShuffle: False\n================\n\nINFO:tensorflow:Starting evaluation at 2017-12-26-16:22:50\nINFO:tensorflow:Restoring parameters from trained_models/sms-class-model-01/model.ckpt-167\nINFO:tensorflow:Finished evaluation at 2017-12-26-16:22:51\nINFO:tensorflow:Saving dict for global step 167: accuracy = 0.958675, auroc = 0.990997, global_step = 167, loss = 0.189101\nINFO:tensorflow:Restoring parameters from trained_models/sms-class-model-01/model.ckpt-167\nINFO:tensorflow:Assets added to graph.\nINFO:tensorflow:Assets written to: b\"trained_models/sms-class-model-01/export/predict/temp-b'1514305372'/assets\"\nINFO:tensorflow:SavedModel written to: b\"trained_models/sms-class-model-01/export/predict/temp-b'1514305372'/saved_model.pbtxt\"\n.......................................\nExperiment finished at 16:22:52\n\nExperiment elapsed time: 7.655679 seconds\n"
]
],
[
[
"## 5. Evaluate the Model",
"_____no_output_____"
]
],
[
[
"TRAIN_SIZE = 4179\nTEST_SIZE = 1393\n\ntrain_input_fn = lambda: input_fn(files_name_pattern= TRAIN_DATA_FILES_PATTERN, \n mode= tf.estimator.ModeKeys.EVAL,\n batch_size= TRAIN_SIZE)\n\ntest_input_fn = lambda: input_fn(files_name_pattern= VALID_DATA_FILES_PATTERN, \n mode= tf.estimator.ModeKeys.EVAL,\n batch_size= TEST_SIZE)\n\nestimator = create_estimator(run_config, hparams)\n\ntrain_results = estimator.evaluate(input_fn=train_input_fn, steps=1)\nprint()\nprint(\"######################################################################################\")\nprint(\"# Train Measures: {}\".format(train_results))\nprint(\"######################################################################################\")\n\ntest_results = estimator.evaluate(input_fn=test_input_fn, steps=1)\nprint()\nprint(\"######################################################################################\")\nprint(\"# Test Measures: {}\".format(test_results))\nprint(\"######################################################################################\")",
"INFO:tensorflow:Using config: {'_model_dir': 'trained_models/sms-class-model-01', '_tf_random_seed': 19830610, '_save_summary_steps': 100, '_save_checkpoints_steps': None, '_save_checkpoints_secs': 600, '_session_config': None, '_keep_checkpoint_max': 5, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 5000, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x11401f908>, '_task_type': 'worker', '_task_id': 0, '_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}\n\nEstimator Type: <class 'tensorflow.python.estimator.estimator.Estimator'>\n\n\n* data input_fn:\n================\nInput file(s): data/sms-spam/train-*.tsv\nBatch size: 4179\nEpoch Count: 1\nMode: eval\nThread Count: 4\nShuffle: False\n================\n\nINFO:tensorflow:Starting evaluation at 2017-12-26-16:22:53\nINFO:tensorflow:Restoring parameters from trained_models/sms-class-model-01/model.ckpt-167\nINFO:tensorflow:Evaluation [1/1]\nINFO:tensorflow:Finished evaluation at 2017-12-26-16:22:53\nINFO:tensorflow:Saving dict for global step 167: accuracy = 1.0, auroc = 1.0, global_step = 167, loss = 0.00494254\n\n######################################################################################\n# Train Measures: {'accuracy': 1.0, 'auroc': 1.0000001, 'loss': 0.0049425424, 'global_step': 167}\n######################################################################################\n\n* data input_fn:\n================\nInput file(s): data/sms-spam/valid-*.tsv\nBatch size: 1393\nEpoch Count: 1\nMode: eval\nThread Count: 4\nShuffle: False\n================\n\nINFO:tensorflow:Starting evaluation at 2017-12-26-16:22:54\nINFO:tensorflow:Restoring parameters from trained_models/sms-class-model-01/model.ckpt-167\nINFO:tensorflow:Evaluation [1/1]\nINFO:tensorflow:Finished evaluation at 2017-12-26-16:22:55\nINFO:tensorflow:Saving dict for global step 167: accuracy = 0.958675, auroc = 0.990997, global_step = 167, loss = 0.199238\n\n######################################################################################\n# Test Measures: {'accuracy': 0.95867485, 'auroc': 0.99099678, 'loss': 0.19923805, 'global_step': 167}\n######################################################################################\n"
]
],
[
[
"## 6. Predict Using Serving Function",
"_____no_output_____"
]
],
[
[
"import os\n\nexport_dir = model_dir +\"/export/predict/\"\n\nsaved_model_dir = export_dir + \"/\" + os.listdir(path=export_dir)[-1] \n\nprint(saved_model_dir)\nprint(\"\")\n\npredictor_fn = tf.contrib.predictor.from_saved_model(\n export_dir = saved_model_dir,\n signature_def_key=\"prediction\"\n)\n\noutput = predictor_fn(\n {\n 'sms':[\n 'ok, I will be with you in 5 min. see you then',\n 'win 1000 cash free of charge promo hot deal sexy',\n 'hot girls sexy tonight call girls waiting call chat'\n ]\n \n }\n)\nprint(output)",
"trained_models/sms-class-model-01/export/predict//1514305372\n\nINFO:tensorflow:Restoring parameters from b'trained_models/sms-class-model-01/export/predict//1514305372/variables/variables'\n{'class': array([b'ham', b'spam', b'spam'], dtype=object), 'probabilities': array([[ 7.56433234e-04, 9.99243617e-01],\n [ 8.26096475e-01, 1.73903525e-01],\n [ 9.40912008e-01, 5.90880252e-02]], dtype=float32)}\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a26408a46b868baeef8de67ce80640aac069e8e
| 35,586 |
ipynb
|
Jupyter Notebook
|
AAS2015/notebooks/02_Unit_Conversion.ipynb
|
astropy/astropy-workshops
|
2c35a2775b5926e1bcbffadd5934591d0acb989f
|
[
"BSD-3-Clause"
] | 1 |
2019-12-10T19:45:03.000Z
|
2019-12-10T19:45:03.000Z
|
AAS2015/notebooks/02_Unit_Conversion.ipynb
|
astropy/astropy-workshops
|
2c35a2775b5926e1bcbffadd5934591d0acb989f
|
[
"BSD-3-Clause"
] | null | null | null |
AAS2015/notebooks/02_Unit_Conversion.ipynb
|
astropy/astropy-workshops
|
2c35a2775b5926e1bcbffadd5934591d0acb989f
|
[
"BSD-3-Clause"
] | 2 |
2019-09-30T01:37:34.000Z
|
2019-10-31T18:19:54.000Z
| 27.437163 | 1,351 | 0.495532 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
4a2642c2a1ca8d7f8a85da40a28e94665dea6414
| 2,928 |
ipynb
|
Jupyter Notebook
|
Number of alphabets and alphanumeric characters in a string.ipynb
|
Parv-Joshi/School-Python-Codes
|
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
|
[
"Unlicense"
] | null | null | null |
Number of alphabets and alphanumeric characters in a string.ipynb
|
Parv-Joshi/School-Python-Codes
|
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
|
[
"Unlicense"
] | null | null | null |
Number of alphabets and alphanumeric characters in a string.ipynb
|
Parv-Joshi/School-Python-Codes
|
d8dce0f5dbdbc9af3bed8f6a4a9d2fb3035274ec
|
[
"Unlicense"
] | null | null | null | 23.612903 | 129 | 0.568306 |
[
[
[
"###### Excercise: Create a program that counts the number of alphabets, and alphanumeric characters in a user-input string.",
"_____no_output_____"
]
],
[
[
"string = input(\"Enter a string: \")\nprint(\"The total length of the string is:\", len(string))\ncount_alpha, count_alphanumeric = 0, 0\nfor character in string:\n if character.isalpha():\n count_alpha += 1\n if character.isalnum():\n count_alphanumeric += 1\nprint(\"The number of alphabets are\", count_alpha)\nprint(\"The number of alphanumeric characters are\", count_alphanumeric)",
"Enter a string: My name is NOT x0392394\nThe total length of the string is: 23\nThe number of alphabets are 12\nThe number of alphanumeric characters are 19\n"
]
]
] |
[
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
]
] |
4a264a10aa1e73e2742877f562410492c2d28b81
| 748,315 |
ipynb
|
Jupyter Notebook
|
hw1/practical_1/.ipynb_checkpoints/riaan_python_assignment_1-checkpoint.ipynb
|
ixlan/Deep-learning-
|
246e5285b6fb6508814762fddfd00d54515ccf79
|
[
"MIT"
] | null | null | null |
hw1/practical_1/.ipynb_checkpoints/riaan_python_assignment_1-checkpoint.ipynb
|
ixlan/Deep-learning-
|
246e5285b6fb6508814762fddfd00d54515ccf79
|
[
"MIT"
] | null | null | null |
hw1/practical_1/.ipynb_checkpoints/riaan_python_assignment_1-checkpoint.ipynb
|
ixlan/Deep-learning-
|
246e5285b6fb6508814762fddfd00d54515ccf79
|
[
"MIT"
] | null | null | null | 83.517299 | 704 | 0.805559 |
[
[
[
"# Assignment 1: Neural Networks\n\nImplement your code and answer all the questions. Once you complete the assignment and answer the questions inline, you can download the report in pdf (File->Download as->PDF) and send it to us, together with the code. \n\n**Don't submit additional cells in the notebook, we will not check them. Don't change parameters of the learning inside the cells.**\n\nAssignment 1 consists of 4 sections:\n* **Section 1**: Data Preparation\n* **Section 2**: Multinomial Logistic Regression\n* **Section 3**: Backpropagation\n* **Section 4**: Neural Networks\n",
"_____no_output_____"
]
],
[
[
"# Import necessary standard python packages \nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Setting configuration for matplotlib\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0)\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\nplt.rcParams['xtick.labelsize'] = 15\nplt.rcParams['ytick.labelsize'] = 15\nplt.rcParams['axes.labelsize'] = 20\n",
"_____no_output_____"
],
[
"# Import python modules for this assignment\n\nfrom uva_code.cifar10_utils import get_cifar10_raw_data, preprocess_cifar10_data\nfrom uva_code.solver import Solver\nfrom uva_code.losses import SoftMaxLoss, CrossEntropyLoss, HingeLoss\nfrom uva_code.layers import LinearLayer, ReLULayer, SigmoidLayer, TanhLayer, SoftMaxLayer, ELULayer\nfrom uva_code.models import Network\nfrom uva_code.optimizers import SGD\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
]
],
[
[
"## Section 1: Data Preparation\n\nIn this section you will download [CIFAR10](https://www.cs.toronto.edu/~kriz/cifar.html \"CIFAR10\") data which you will use in this assignment. \n\n**Make sure that everything has been downloaded correctly and all images are visible.**",
"_____no_output_____"
]
],
[
[
"# Get raw CIFAR10 data. For Unix users the script to download CIFAR10 dataset (get_cifar10.sh) is provided and \n# it is used inside get_cifar10_raw_data() function. If it doesn't work then manually download the data from \n# http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz and extract it to cifar-10-batches-py folder inside \n# cifar10 folder.\n\n# Downloading the data can take several minutes. \nX_train_raw, Y_train_raw, X_test_raw, Y_test_raw = get_cifar10_raw_data()\n\n#Checking shapes, should be (50000, 32, 32, 3), (50000, ), (10000, 32, 32, 3), (10000, )\nprint 'Train data shape: ', X_train_raw.shape\nprint 'Train labels shape: ', Y_train_raw.shape\nprint 'Test data shape: ', X_test_raw.shape\nprint 'Test labels shape: ', Y_test_raw.shape",
"Train data shape: (50000, 32, 32, 3)\nTrain labels shape: (50000,)\nTest data shape: (10000, 32, 32, 3)\nTest labels shape: (10000,)\n"
],
[
"# Visualize CIFAR10 data\nsamples_per_class = 10\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\nnum_classes = len(classes)\ncan = np.zeros((320, 320, 3),dtype='uint8')\nfor i, cls in enumerate(classes):\n idxs = np.flatnonzero(Y_train_raw == i) \n idxs = np.random.choice(idxs, samples_per_class, replace = False)\n for j in range(samples_per_class):\n can[32 * i:32 * (i + 1), 32 * j:32 * (j + 1),:] = X_train_raw[idxs[j]]\nplt.xticks([], [])\nplt.yticks(range(16, 320, 32), classes)\nplt.title('CIFAR10', fontsize = 20)\nplt.imshow(can)\nplt.show()",
"_____no_output_____"
],
[
"# Normalize CIFAR10 data by subtracting the mean image. With these data you will work in the rest of assignment.\n# The validation subset will be used for tuning the hyperparameters.\nX_train, Y_train, X_val, Y_val, X_test, Y_test = preprocess_cifar10_data(X_train_raw, Y_train_raw, \n X_test_raw, Y_test_raw, num_val = 1000)\n\n#Checking shapes, should be (49000, 3072), (49000, ), (1000, 3072), (1000, ), (10000, 3072), (10000, ) \nprint 'Train data shape: ', X_train.shape\nprint 'Train labels shape: ', Y_train.shape\nprint 'Val data shape: ', X_val.shape\nprint 'Val labels shape: ', Y_val.shape\nprint 'Test data shape: ', X_test.shape\nprint 'Test labels shape: ', Y_test.shape",
"Train data shape: (49000, 3072)\nTrain labels shape: (49000,)\nVal data shape: (1000, 3072)\nVal labels shape: (1000,)\nTest data shape: (10000, 3072)\nTest labels shape: (10000,)\n"
]
],
[
[
"### Data Preparation: Question 1 [4 points]\n\nNeural networks and deep learning methods prefer the input variables to contain as raw data as possible. \nBut in the vast majority of cases data need to be preprocessed. Suppose, you have two types of non-linear activation functions ([Sigmoid](https://en.wikipedia.org/wiki/Sigmoid_function), [ReLU](https://en.wikipedia.org/wiki/Rectifier_(neural_networks)) and two types of normalization ([Per-example mean substraction](http://ufldl.stanford.edu/wiki/index.php/Data_Preprocessing#Per-example_mean_subtraction), [Standardization](http://ufldl.stanford.edu/wiki/index.php/Data_Preprocessing#Feature_Standardization)). Which one should you use for each case and why? For example, in the previous cell we used per-example mean substraction.\n\n**Your Answer**: You should use standardization for the sigmoid activation function. This is because if you have large outliers in the data, you can will end up having vanishing gradients and having a lot of dead neurons. Standardization will also make sure the neurons converge faster.\n\nYou can use both standardization and mean subtraction for ReLu's. This is because the function doesn't have problems with vanishing and exploding gradients. You can however still get dead neurons if the value is stuck at 0. This causes the gradient to be 0 forever.\n\n# Unchecked...",
"_____no_output_____"
],
[
"## Section 2: Multinomial Logistic Regression [5 points]\n\nIn this section you will get started by implementing a linear classification model called [Multinomial Logistic Regression](http://ufldl.stanford.edu/tutorial/supervised/SoftmaxRegression/). Later on you will extend this model to a neural network. You will train it by using the [mini-batch Stochastic Gradient Descent algorithm](http://sebastianruder.com/optimizing-gradient-descent/index.html#minibatchgradientdescent). You should implement how to sample batches, how to compute the loss, how to compute the gradient of the loss with respect to the parameters of the model and how to update the parameters of the model. \n\nYou should get around 0.35 accuracy on the validation and test sets with the provided parameters.\n",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nlearning_rate = 1e-7\nweight_decay = 3e+4\nweight_scale = 0.0001\n\n########################################################################################\n# TODO: #\n# Initialize the weights W using a normal distribution with mean = 0 and std = #\n# weight_scale. Initialize the biases b with 0. #\n######################################################################################## \nW = np.random.normal(scale = weight_scale, size = (X_train.shape[1], num_classes))\nb = np.zeros(shape = num_classes)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\n\ntrain_loss_history = []\ntrain_acc_history = []\n\nval_loss_history = []\nval_acc_history = []\n\ndef softmax_loss(weights, batch, targets):\n x = np.exp((np.dot(batch, weights) + b))\n results = x / np.sum(x, axis = 1)[:, None]\n cross_entropy = 0 \n \n for key,value in enumerate(results[:]):\n value = value[targets[key]]\n cross_entropy +=np.log(value)\n \n return -(1./batch_size)*cross_entropy + (weight_decay/2)*np.sum(weights**2)\n \n \ndef accuracy(weights, batch, target):\n \n x = np.exp((np.dot(batch, weights) + b))\n results = x / np.sum(x, axis = 1)[:, None]\n predictions = np.argmax(results, axis = 1)\n correct = sum([int(i == predictions[key]) for key, i in enumerate(target) ])/ float(len(target))\n \n return correct\n\ndef get_gradient(weights, batch, target):\n x = np.exp((np.dot(batch, weights) + b))\n p_y = x / np.sum(x, axis = 1)[:, None]\n \n oneHot = np.zeros((batch_size, 10))\n oneHot[np.arange(batch_size), target] = 1\n \n diff = (oneHot - p_y)\n update = np.zeros((weights.shape[0], 10))\n \n for x in range(batch_size):\n update += np.outer(batch[x],diff[x])\n \n return ((-update/batch_size) + weight_decay*weights), (-np.sum(diff, axis = 0)/batch_size)\n\n\nfor iteration in range(num_iterations):\n ########################################################################################\n # TODO: #\n # Sample a random mini-batch with the size of batch_size from the train set. Put the #\n # images to X_train_batch and labels to Y_train_batch variables. #\n ########################################################################################\n \n sample = np.random.choice(X_train.shape[0], batch_size, replace = False)\n X_train_batch = X_train[sample]\n Y_train_batch = Y_train[sample]\n \n \n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n \n ########################################################################################\n # TODO: #\n # Compute the loss and the accuracy of the multinomial logistic regression classifier #\n # on X_train_batch, Y_train_batch. #\n ########################################################################################\n train_loss = softmax_loss(W, X_train_batch, Y_train_batch)\n train_acc = accuracy(W, X_train_batch, Y_train_batch)\n \n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n \n ########################################################################################\n # TODO: #\n # Compute the gradients of the loss with the respect to the weights and biases. Put #\n # them in dW and db variables. #\n ########################################################################################\n \n # NOTE: implemented this with regularization\n dW, db = get_gradient(W, X_train_batch, Y_train_batch)\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n \n ########################################################################################\n # TODO: #\n # Update the weights W and biases b using the Stochastic Gradient Descent update rule. #\n ########################################################################################\n W -= learning_rate* dW\n b -= learning_rate* db\n\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n \n if iteration % val_iteration == 0 or iteration == num_iterations - 1:\n ########################################################################################\n # TODO: #\n # Compute the loss and the accuracy on the validation set. #\n ########################################################################################\n val_loss = softmax_loss(W, X_val, Y_val)\n val_acc = accuracy(W, X_val, Y_val)\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n train_loss_history.append(train_loss)\n train_acc_history.append(train_acc)\n val_loss_history.append(val_loss)\n val_acc_history.append(val_acc)\n \n # Output loss and accuracy during training\n print(\"Iteration {0:d}/{1:d}. Train Loss = {2:.3f}, Train Accuracy = {3:.3f}\".\n format(iteration, num_iterations, train_loss, train_acc))\n print(\"Iteration {0:d}/{1:d}. Validation Loss = {2:.3f}, Validation Accuracy = {3:.3f}\".\n format(iteration, num_iterations, val_loss, val_acc))\n\n########################################################################################\n# TODO: #\n# Compute the accuracy on the test set. #\n########################################################################################\ntest_acc = accuracy(W, X_test, Y_test)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\nprint(\"Test Accuracy = {0:.3f}\".format(test_acc))",
"Iteration 0/1500. Train Loss = 6.907, Train Accuracy = 0.125\nIteration 0/1500. Validation Loss = 16.130, Validation Accuracy = 0.125\nIteration 100/1500. Train Loss = 4.587, Train Accuracy = 0.275\nIteration 100/1500. Validation Loss = 12.869, Validation Accuracy = 0.285\nIteration 200/1500. Train Loss = 3.475, Train Accuracy = 0.285\nIteration 200/1500. Validation Loss = 11.482, Validation Accuracy = 0.328\nIteration 300/1500. Train Loss = 2.818, Train Accuracy = 0.340\nIteration 300/1500. Validation Loss = 10.755, Validation Accuracy = 0.345\nIteration 400/1500. Train Loss = 2.433, Train Accuracy = 0.310\nIteration 400/1500. Validation Loss = 10.360, Validation Accuracy = 0.346\nIteration 500/1500. Train Loss = 2.309, Train Accuracy = 0.350\nIteration 500/1500. Validation Loss = 10.142, Validation Accuracy = 0.358\nIteration 600/1500. Train Loss = 2.160, Train Accuracy = 0.325\nIteration 600/1500. Validation Loss = 10.020, Validation Accuracy = 0.350\nIteration 700/1500. Train Loss = 2.101, Train Accuracy = 0.355\nIteration 700/1500. Validation Loss = 9.950, Validation Accuracy = 0.349\nIteration 800/1500. Train Loss = 2.068, Train Accuracy = 0.335\nIteration 800/1500. Validation Loss = 9.917, Validation Accuracy = 0.360\nIteration 900/1500. Train Loss = 2.075, Train Accuracy = 0.340\nIteration 900/1500. Validation Loss = 9.887, Validation Accuracy = 0.360\nIteration 1000/1500. Train Loss = 2.017, Train Accuracy = 0.330\nIteration 1000/1500. Validation Loss = 9.883, Validation Accuracy = 0.355\nIteration 1100/1500. Train Loss = 2.027, Train Accuracy = 0.380\nIteration 1100/1500. Validation Loss = 9.872, Validation Accuracy = 0.356\nIteration 1200/1500. Train Loss = 2.035, Train Accuracy = 0.350\nIteration 1200/1500. Validation Loss = 9.882, Validation Accuracy = 0.368\nIteration 1300/1500. Train Loss = 2.112, Train Accuracy = 0.295\nIteration 1300/1500. Validation Loss = 9.870, Validation Accuracy = 0.362\nIteration 1400/1500. Train Loss = 1.985, Train Accuracy = 0.355\nIteration 1400/1500. Validation Loss = 9.878, Validation Accuracy = 0.361\nIteration 1499/1500. Train Loss = 2.015, Train Accuracy = 0.330\nIteration 1499/1500. Validation Loss = 9.872, Validation Accuracy = 0.360\nTest Accuracy = 0.344\n"
],
[
"# Visualize a learning curve of multinomial logistic regression classifier\nplt.subplot(2, 1, 1)\nplt.plot(range(0, num_iterations + 1, val_iteration), train_loss_history, '-o', label = 'train')\nplt.plot(range(0, num_iterations + 1, val_iteration), val_loss_history, '-o', label = 'validation')\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\nplt.legend(loc='upper right')\n\nplt.subplot(2, 1, 2)\nplt.plot(range(0, num_iterations + 1, val_iteration), train_acc_history, '-o', label='train')\nplt.plot(range(0, num_iterations + 1, val_iteration), val_acc_history, '-o', label='validation')\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.gcf().set_size_inches(15, 12)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Multinomial Logistic Regression: Question 1 [4 points]\n\nWhat is the value of the loss and the accuracy you expect to obtain at iteration = 0 and why? Consider weight_decay = 0.\n\n**Your Answer**: Given that all values of the weights are around 0. You can assume that the largest output of the softmax classifier will be determined at random. Therefore it would simply be the average of the proportions of the classes.",
"_____no_output_____"
],
[
"### Multinomial Logistic Regression: Question 2 [4 points]\n\nName at least three factors that determine the size of batches in practice and briefly motivate your answers. The factors might be related to computational or performance aspects.\n\n**Your Answer**: (Source: Deep learning chapter 8)\n- Larger batches give a more accurate estimate of the true gradient, however there are diminishing marginal returns to increasing mini batch size. An optimal batch size would balance the trade off in increased computation time and a more accurate gradient.\n\n- Having a small batch size can have a regularizing effect on the gradient due to the increased random noise. However too small a batch size can increase runtime because of having to calculate more steps.\n\n- specific hardware architecture runs better with a specific batch size. For example GPU's tend to run better with batch sizes of power 2.\n",
"_____no_output_____"
],
[
"### Mulinomial Logistic Regression: Question 3 [4 points]\n\nDoes the learning rate depend on the batch size? Explain how you should change the learning rate with respect to changes of the batch size.\n\nName two extreme choices of a batch size and explain their advantages and disadvantages.\n\n**Your Answer**: The learning rate doesn't directly depend on the batch size. However given that the amount of error that is included in your updates when you have a smaller batch size you may want to set a lower learning rate. \n\nExtreme examples include, having a batch size of 1 (stochastic gradient descent) or having a batch size of all of your traning data. The advantage of having a batch size of 1 is that it has a regularizing effect, this is due to the error that is intrinsic in such a small sample size, which gives a slightly inaccurate estimate of the gradient. The disadvantage is that it will take very long to converge. The advantage of the large batch size is that it will give the most accurate estimate of the gradient: Disadvantages include overfitting, because it will be perfectly fitted to your training data and memory issues. This is due to the fact that you have to load the entire dataset into memory.",
"_____no_output_____"
],
[
"### Multinomial Logistic Regression: Question 4 [4 points]\n\nHow can you describe the rows of weight matrix W? What are they representing? Why? \n\n**Your Answer**: The rows of the weights W are the weights for each of the pixels, that is used to predict the softmax class. In total there are 10 such rows. These weight should have the right color values that appear in that part of the pictures of that class. \n\n**Hint**: Before answering the question visualize rows of weight matrix W in the cell below.",
"_____no_output_____"
]
],
[
[
"W= W.T\n",
"_____no_output_____"
],
[
"# sanity check\nvals = W.copy()\nplt.hist(list(vals.reshape(-1)), bins = 100 )\nplt.show()\n",
"_____no_output_____"
],
[
"########################################################################################\n# TODO: #\n# Visualize the learned weights for each class. #\n########################################################################################\nclasses = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\ncan = np.zeros((32,320 , 3) , dtype = 'uint8')\nfor i, cls in enumerate(classes):\n image = np.reshape(W[i].copy(), (32,32,3))\n # normalize per channel between 0 and 1 \n \n # could normalize them per channel, if necessary\n #image[:,:,0] -= image[:,:,0].min()\n #image[:,:,1] -= image[:,:,1].min()\n #image[:,:,2] -= image[:,:,2].min()\n image -= image.min()\n image *= (255.0/image.max())\n can[0:32 , 32 * i :32 * (i + 1) ,:] = np.array(image, dtype = 'uint8')\n\nplt.figure(figsize = (15,4))\nplt.xticks(range(16, 320, 32), classes)\nplt.yticks([], [])\nplt.title('Weights Visualization', fontsize = 20)\n\nplt.imshow(can)\nplt.show()\n\n\n\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################",
"_____no_output_____"
]
],
[
[
"## Section 3: Backpropagation\n\nFollow the instructions and solve the tasks in paper_assignment_1.pdf. Write your solutions in a separate pdf file. You don't need to put anything here.\n ",
"_____no_output_____"
],
[
"## Section 4: Neural Networks [10 points]\n\nA modular implementation of neural networks allows to define deeper and more flexible architectures. In this section you will implement the multinomial logistic regression classifier from the Section 2 as a one-layer neural network that consists of two parts: a linear transformation layer (module 1) and a softmax loss layer (module 2).\n\nYou will implement the multinomial logistic regression classifier as a modular network by following next steps:\n\n1. Implement the forward and backward passes for the linear layer in **layers.py** file. Write your code inside the ***forward*** and ***backward*** methods of ***LinearLayer*** class. Compute the regularization loss of the weights inside the ***layer_loss*** method of ***LinearLayer*** class. \n2. Implement the softmax loss computation in **losses.py** file. Write your code inside the ***SoftMaxLoss*** function. \n3. Implement the ***forward***, ***backward*** and ***loss*** methods for the ***Network*** class inside the **models.py** file.\n4. Implement the SGD update rule inside ***SGD*** class in **optimizers.py** file.\n5. Implement the ***train_on_batch***, ***test_on_batch***, ***fit***, ***predcit***, ***score***, ***accuracy*** methods of ***Solver*** class in ***solver.py*** file.\n\nYou should get the same results for the next cell as in Section 2. **Don't change the parameters**.\n",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nlearning_rate = 1e-7\nweight_decay = 3e+4\nweight_scale = 0.0001\n\n########################################################################################\n# TODO: #\n# Build the multinomial logistic regression classifier using the Network model. You #\n# will need to use add_layer and add_loss methods. Train this model using Solver class #\n# with SGD optimizer. In configuration of the optimizer you need to specify only #\n# learning rate. Use the fit method to train classifier. Don't forget to include #\n# X_val and Y_val in arguments to output the validation loss and accuracy during #\n# training. Set the verbose to True to compare with the multinomial logistic #\n# regression classifier from the Section 2. #\n########################################################################################\nlayer_params = {'input_size': X_train.shape[1], 'output_size':10, 'weight_decay': weight_decay }\nmodel = Network()\nmodel.add_layer(LinearLayer(layer_params))\nmodel.add_loss(SoftMaxLoss)\noptimizer = SGD()\noptimizer_config = {'learning_rate': learning_rate}\nsolver = Solver(model)\nsolver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = True, num_iterations = num_iterations)\n\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\n\n########################################################################################\n# TODO: #\n# Compute the accuracy on the test set. #\n########################################################################################\ntest_acc = solver.score(X_test,Y_test)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\nprint(\"Test Accuracy = {0:.3f}\".format(test_acc))",
"Iteration 0/1500: Train Loss = 4.872, Train Accuracy = 0.125\nIteration 0/1500. Validation Loss = 4.971, Validation Accuracy = 0.120\nIteration 100/1500: Train Loss = 3.154, Train Accuracy = 0.175\nIteration 100/1500. Validation Loss = 3.175, Validation Accuracy = 0.144\nIteration 200/1500: Train Loss = 2.501, Train Accuracy = 0.245\nIteration 200/1500. Validation Loss = 2.538, Validation Accuracy = 0.193\nIteration 300/1500: Train Loss = 2.314, Train Accuracy = 0.240\nIteration 300/1500. Validation Loss = 2.241, Validation Accuracy = 0.216\nIteration 400/1500: Train Loss = 2.081, Train Accuracy = 0.245\nIteration 400/1500. Validation Loss = 2.100, Validation Accuracy = 0.254\nIteration 500/1500: Train Loss = 2.079, Train Accuracy = 0.310\nIteration 500/1500. Validation Loss = 2.029, Validation Accuracy = 0.279\nIteration 600/1500: Train Loss = 1.980, Train Accuracy = 0.305\nIteration 600/1500. Validation Loss = 1.993, Validation Accuracy = 0.304\nIteration 700/1500: Train Loss = 1.940, Train Accuracy = 0.330\nIteration 700/1500. Validation Loss = 1.974, Validation Accuracy = 0.319\nIteration 800/1500: Train Loss = 1.962, Train Accuracy = 0.335\nIteration 800/1500. Validation Loss = 1.966, Validation Accuracy = 0.342\nIteration 900/1500: Train Loss = 1.967, Train Accuracy = 0.340\nIteration 900/1500. Validation Loss = 1.959, Validation Accuracy = 0.354\nIteration 1000/1500: Train Loss = 1.916, Train Accuracy = 0.295\nIteration 1000/1500. Validation Loss = 1.958, Validation Accuracy = 0.352\nIteration 1100/1500: Train Loss = 1.939, Train Accuracy = 0.370\nIteration 1100/1500. Validation Loss = 1.956, Validation Accuracy = 0.360\nIteration 1200/1500: Train Loss = 1.941, Train Accuracy = 0.355\nIteration 1200/1500. Validation Loss = 1.958, Validation Accuracy = 0.362\nIteration 1300/1500: Train Loss = 2.019, Train Accuracy = 0.280\nIteration 1300/1500. Validation Loss = 1.956, Validation Accuracy = 0.360\nIteration 1400/1500: Train Loss = 1.897, Train Accuracy = 0.345\nIteration 1400/1500. Validation Loss = 1.957, Validation Accuracy = 0.355\nIteration 1499/1500: Train Loss = 1.924, Train Accuracy = 0.330\nIteration 1499/1500. Validation Loss = 1.956, Validation Accuracy = 0.356\nTest Accuracy = 0.344\n"
]
],
[
[
"### Neural Networks: Task 1 [5 points]\n\nTuning hyperparameters is very important even for multinomial logistic regression. \n\nWhat are the best learning rate and weight decay? What is test accuracy of the model trained with the best hyperparameters values?\n\n**Your Answer**: Learning rate = 1.000000e-06, weight decay = 3.000000e+03: Validation Accuracy = 0.409\n\nThe test accuracy is 0.385\n\n***Hint:*** You should be able to get the test accuracy around 0.4.\n\nImplement the tuning of hyperparameters (learning rate and weight decay) in the next cell. ",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nweight_scale = 0.0001\n\n# You should try diffierent range of hyperparameters. \n# added some values here! - Riaan\nlearning_rates = [0.9e-6,1e-6, 1.1e-6 ]\nweight_decays = [ 2.8e+03, 3e+03, 3.2e+3 ]\n\n#,3e+04, 3e+05\n\nbest_val_acc = -1\nbest_solver = None\nfor learning_rate in learning_rates:\n for weight_decay in weight_decays:\n ########################################################################################\n # TODO: #\n # Implement the tuning of hyperparameters for the multinomial logistic regression. Save#\n # maximum of the validation accuracy in best_val_acc and corresponding solver to #\n # best_solver variables. Store the maximum of the validation score for the current #\n # setting of the hyperparameters in cur_val_acc variable. #\n ########################################################################################\n layer_params = {'input_size': X_train.shape[1], 'output_size':10, 'weight_decay': weight_decay }\n \n model = Network()\n model.add_layer(LinearLayer(layer_params))\n model.add_loss(SoftMaxLoss)\n \n optimizer = SGD()\n optimizer_config = {'learning_rate': learning_rate}\n \n solver = Solver(model)\n solver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = False, num_iterations = num_iterations)\n \n cur_val_acc = solver.score(X_val, Y_val)\n \n if cur_val_acc > best_val_acc:\n best_solver = solver\n best_val_acc = cur_val_acc\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n print(\"Learning rate = {0:e}, weight decay = {1:e}: Validation Accuracy = {2:.3f}\".format(\n learning_rate, weight_decay, cur_val_acc)) \n\n########################################################################################\n# TODO: #\n# Compute the accuracy on the test set for the best solver. #\n########################################################################################\ntest_acc = best_solver.score(X_test, Y_test)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\nprint(\"Best Test Accuracy = {0:.3f}\".format(test_acc))",
"Learning rate = 9.000000e-07, weight decay = 2.800000e+03: Validation Accuracy = 0.386\nLearning rate = 9.000000e-07, weight decay = 3.000000e+03: Validation Accuracy = 0.400\nLearning rate = 9.000000e-07, weight decay = 3.200000e+03: Validation Accuracy = 0.390\nLearning rate = 1.000000e-06, weight decay = 2.800000e+03: Validation Accuracy = 0.408\nLearning rate = 1.000000e-06, weight decay = 3.000000e+03: Validation Accuracy = 0.409\nLearning rate = 1.000000e-06, weight decay = 3.200000e+03: Validation Accuracy = 0.389\nLearning rate = 1.100000e-06, weight decay = 2.800000e+03: Validation Accuracy = 0.397\nLearning rate = 1.100000e-06, weight decay = 3.000000e+03: Validation Accuracy = 0.402\nLearning rate = 1.100000e-06, weight decay = 3.200000e+03: Validation Accuracy = 0.403\nBest Test Accuracy = 0.385\n"
]
],
[
[
"### Neural Networks: Task 2 [5 points]\n\nImplement a two-layer neural network with a ReLU activation function. Write your code for the ***forward*** and ***backward*** methods of ***ReLULayer*** class in **layers.py** file.\n\nTrain the network with the following structure: linear_layer-relu-linear_layer-softmax_loss. You should get the accuracy on the test set around 0.44. ",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Number of hidden units in a hidden layer.\nnum_hidden_units = 100\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nlearning_rate = 2e-3\nweight_decay = 0\nweight_scale = 0.0001\n\n########################################################################################\n# TODO: #\n# Build the model with the structure: linear_layer-relu-linear_layer-softmax_loss. #\n# Train this model using Solver class with SGD optimizer. In configuration of the #\n# optimizer you need to specify only the learning rate. Use the fit method to train. # \n########################################################################################\nlayer_params = {'input_size': X_train.shape[1], 'output_size':num_hidden_units, 'weight_decay': weight_decay }\nmodel = Network()\nmodel.add_layer(LinearLayer(layer_params))\nmodel.add_layer(ReLULayer(layer_params))\nlayer_params2 = {'input_size': num_hidden_units, 'output_size':10, 'weight_decay': weight_decay }\nmodel.add_layer(LinearLayer(layer_params2))\nmodel.add_loss(SoftMaxLoss)\noptimizer = SGD()\noptimizer_config = {'learning_rate': learning_rate}\nsolver = Solver(model)\nsolver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = True, num_iterations = num_iterations)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\n \n########################################################################################\n# TODO: #\n# Compute the accuracy on the test set. #\n########################################################################################\ntest_acc = solver.score(X_test, Y_test)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\nprint(\"Test Accuracy = {0:.3f}\".format(test_acc))",
"Iteration 0/1500: Train Loss = 2.301, Train Accuracy = 0.320\nIteration 0/1500. Validation Loss = 2.291, Validation Accuracy = 0.193\nIteration 100/1500: Train Loss = 1.711, Train Accuracy = 0.450\nIteration 100/1500. Validation Loss = 1.735, Validation Accuracy = 0.393\nIteration 200/1500: Train Loss = 1.690, Train Accuracy = 0.510\nIteration 200/1500. Validation Loss = 1.644, Validation Accuracy = 0.426\nIteration 300/1500: Train Loss = 1.492, Train Accuracy = 0.610\nIteration 300/1500. Validation Loss = 1.572, Validation Accuracy = 0.446\nIteration 400/1500: Train Loss = 1.599, Train Accuracy = 0.540\nIteration 400/1500. Validation Loss = 1.609, Validation Accuracy = 0.425\nIteration 500/1500: Train Loss = 1.426, Train Accuracy = 0.635\nIteration 500/1500. Validation Loss = 1.538, Validation Accuracy = 0.456\nIteration 600/1500: Train Loss = 1.579, Train Accuracy = 0.545\nIteration 600/1500. Validation Loss = 1.605, Validation Accuracy = 0.434\nIteration 700/1500: Train Loss = 1.432, Train Accuracy = 0.670\nIteration 700/1500. Validation Loss = 1.489, Validation Accuracy = 0.463\nIteration 800/1500: Train Loss = 1.621, Train Accuracy = 0.630\nIteration 800/1500. Validation Loss = 1.586, Validation Accuracy = 0.448\nIteration 900/1500: Train Loss = 1.560, Train Accuracy = 0.645\nIteration 900/1500. Validation Loss = 1.495, Validation Accuracy = 0.467\nIteration 1000/1500: Train Loss = 1.358, Train Accuracy = 0.630\nIteration 1000/1500. Validation Loss = 1.572, Validation Accuracy = 0.476\nIteration 1100/1500: Train Loss = 1.510, Train Accuracy = 0.590\nIteration 1100/1500. Validation Loss = 1.615, Validation Accuracy = 0.446\nIteration 1200/1500: Train Loss = 1.550, Train Accuracy = 0.630\nIteration 1200/1500. Validation Loss = 1.452, Validation Accuracy = 0.485\nIteration 1300/1500: Train Loss = 1.516, Train Accuracy = 0.625\nIteration 1300/1500. Validation Loss = 1.528, Validation Accuracy = 0.458\nIteration 1400/1500: Train Loss = 1.291, Train Accuracy = 0.715\nIteration 1400/1500. Validation Loss = 1.452, Validation Accuracy = 0.493\nIteration 1499/1500: Train Loss = 1.722, Train Accuracy = 0.605\nIteration 1499/1500. Validation Loss = 1.681, Validation Accuracy = 0.452\nTest Accuracy = 0.444\n"
]
],
[
[
"### Neural Networks: Task 3 [5 points]\n\nWhy the ReLU layer is important? What will happen if we exclude this layer? What will be the accuracy on the test set?\n\n**Your Answer**: Relu takes care of the vanishing gradient problem, which allows us to make our networks bigger. As compared to other activation functions such as the sigmoid activation function, the relu gets stuck less. You end up with less dead neurons. If we exclude the relu layer, we will end up with a network that has more dead neurons, and is somewhat harder to train. The accuracy on the test set will be lower.\n \nImplement other activation functions: [Sigmoid](https://en.wikipedia.org/wiki/Sigmoid_function), [Tanh](https://en.wikipedia.org/wiki/Hyperbolic_function#Hyperbolic_tangent) and [ELU](https://arxiv.org/pdf/1511.07289v3.pdf) functions. \nWrite your code for the ***forward*** and ***backward*** methods of ***SigmoidLayer***, ***TanhLayer*** and ***ELULayer*** classes in **layers.py** file.\n",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Number of hidden units in a hidden layer. \nnum_hidden_units = 100\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nlearning_rate = 2e-3\nweight_decay = 0\nweight_scale = 0.0001\n\n# Store results here\nresults = {}\nlayers_name = ['ReLU', 'Sigmoid', 'Tanh', 'ELU']\nlayers = [ReLULayer, SigmoidLayer, TanhLayer, ELULayer]\n\n\nfor layer_name, layer in zip(layers_name, layers):\n ########################################################################################\n # Build the model with the structure: linear_layer-activation-linear_layer-softmax_loss# \n # Train this model using Solver class with SGD optimizer. In configuration of the #\n # optimizer you need to specify only the learning rate. Use the fit method to train. #\n # Store validation history in results dictionary variable. # \n ########################################################################################\n layer_params = {'input_size': X_train.shape[1], 'output_size':num_hidden_units, 'weight_decay': weight_decay }\n model = Network()\n model.add_layer(LinearLayer(layer_params))\n model.add_layer(layer(layer_params))\n layer_params2 = {'input_size': num_hidden_units, 'output_size':10, 'weight_decay': weight_decay }\n model.add_layer(LinearLayer(layer_params2))\n model.add_loss(SoftMaxLoss)\n optimizer = SGD()\n optimizer_config = {'learning_rate': learning_rate}\n solver = Solver(model)\n _,_,_,val_acc_history= solver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = True, num_iterations = num_iterations)\n\n ########################################################################################\n # END OF YOUR CODE #\n ########################################################################################\n results[layer_name] = val_acc_history\n",
"Iteration 0/1500: Train Loss = 2.301, Train Accuracy = 0.320\nIteration 0/1500. Validation Loss = 2.291, Validation Accuracy = 0.193\nIteration 100/1500: Train Loss = 1.711, Train Accuracy = 0.450\nIteration 100/1500. Validation Loss = 1.735, Validation Accuracy = 0.393\nIteration 200/1500: Train Loss = 1.690, Train Accuracy = 0.510\nIteration 200/1500. Validation Loss = 1.644, Validation Accuracy = 0.426\nIteration 300/1500: Train Loss = 1.492, Train Accuracy = 0.610\nIteration 300/1500. Validation Loss = 1.572, Validation Accuracy = 0.446\nIteration 400/1500: Train Loss = 1.599, Train Accuracy = 0.540\nIteration 400/1500. Validation Loss = 1.609, Validation Accuracy = 0.425\nIteration 500/1500: Train Loss = 1.426, Train Accuracy = 0.635\nIteration 500/1500. Validation Loss = 1.538, Validation Accuracy = 0.456\nIteration 600/1500: Train Loss = 1.579, Train Accuracy = 0.545\nIteration 600/1500. Validation Loss = 1.605, Validation Accuracy = 0.434\nIteration 700/1500: Train Loss = 1.432, Train Accuracy = 0.670\nIteration 700/1500. Validation Loss = 1.489, Validation Accuracy = 0.463\nIteration 800/1500: Train Loss = 1.621, Train Accuracy = 0.630\nIteration 800/1500. Validation Loss = 1.586, Validation Accuracy = 0.448\nIteration 900/1500: Train Loss = 1.560, Train Accuracy = 0.645\nIteration 900/1500. Validation Loss = 1.495, Validation Accuracy = 0.467\nIteration 1000/1500: Train Loss = 1.358, Train Accuracy = 0.630\nIteration 1000/1500. Validation Loss = 1.572, Validation Accuracy = 0.476\nIteration 1100/1500: Train Loss = 1.510, Train Accuracy = 0.590\nIteration 1100/1500. Validation Loss = 1.615, Validation Accuracy = 0.446\nIteration 1200/1500: Train Loss = 1.550, Train Accuracy = 0.630\nIteration 1200/1500. Validation Loss = 1.452, Validation Accuracy = 0.485\nIteration 1300/1500: Train Loss = 1.516, Train Accuracy = 0.625\nIteration 1300/1500. Validation Loss = 1.528, Validation Accuracy = 0.458\nIteration 1400/1500: Train Loss = 1.291, Train Accuracy = 0.715\nIteration 1400/1500. Validation Loss = 1.452, Validation Accuracy = 0.493\nIteration 1499/1500: Train Loss = 1.722, Train Accuracy = 0.605\nIteration 1499/1500. Validation Loss = 1.681, Validation Accuracy = 0.452\nIteration 0/1500: Train Loss = 2.303, Train Accuracy = 0.065\nIteration 0/1500. Validation Loss = 2.302, Validation Accuracy = 0.107\nIteration 100/1500: Train Loss = 2.235, Train Accuracy = 0.180\nIteration 100/1500. Validation Loss = 2.239, Validation Accuracy = 0.209\nIteration 200/1500: Train Loss = 2.177, Train Accuracy = 0.225\nIteration 200/1500. Validation Loss = 2.170, Validation Accuracy = 0.217\nIteration 300/1500: Train Loss = 2.069, Train Accuracy = 0.295\nIteration 300/1500. Validation Loss = 2.125, Validation Accuracy = 0.242\nIteration 400/1500: Train Loss = 2.123, Train Accuracy = 0.275\nIteration 400/1500. Validation Loss = 2.089, Validation Accuracy = 0.254\nIteration 500/1500: Train Loss = 2.068, Train Accuracy = 0.220\nIteration 500/1500. Validation Loss = 2.062, Validation Accuracy = 0.274\nIteration 600/1500: Train Loss = 2.014, Train Accuracy = 0.275\nIteration 600/1500. Validation Loss = 2.037, Validation Accuracy = 0.289\nIteration 700/1500: Train Loss = 2.017, Train Accuracy = 0.360\nIteration 700/1500. Validation Loss = 2.013, Validation Accuracy = 0.311\nIteration 800/1500: Train Loss = 1.999, Train Accuracy = 0.295\nIteration 800/1500. Validation Loss = 1.998, Validation Accuracy = 0.319\nIteration 900/1500: Train Loss = 1.957, Train Accuracy = 0.290\nIteration 900/1500. Validation Loss = 1.973, Validation Accuracy = 0.322\nIteration 1000/1500: Train Loss = 1.951, Train Accuracy = 0.335\nIteration 1000/1500. Validation Loss = 1.959, Validation Accuracy = 0.323\nIteration 1100/1500: Train Loss = 1.857, Train Accuracy = 0.440\nIteration 1100/1500. Validation Loss = 1.946, Validation Accuracy = 0.348\nIteration 1200/1500: Train Loss = 1.933, Train Accuracy = 0.350\nIteration 1200/1500. Validation Loss = 1.929, Validation Accuracy = 0.345\nIteration 1300/1500: Train Loss = 1.891, Train Accuracy = 0.345\nIteration 1300/1500. Validation Loss = 1.914, Validation Accuracy = 0.355\nIteration 1400/1500: Train Loss = 1.920, Train Accuracy = 0.370\nIteration 1400/1500. Validation Loss = 1.900, Validation Accuracy = 0.363\nIteration 1499/1500: Train Loss = 1.893, Train Accuracy = 0.350\nIteration 1499/1500. Validation Loss = 1.888, Validation Accuracy = 0.360\nIteration 0/1500: Train Loss = 2.302, Train Accuracy = 0.235\nIteration 0/1500. Validation Loss = 2.300, Validation Accuracy = 0.154\nIteration 100/1500: Train Loss = 2.107, Train Accuracy = 0.260\nIteration 100/1500. Validation Loss = 2.105, Validation Accuracy = 0.257\nIteration 200/1500: Train Loss = 1.963, Train Accuracy = 0.285\nIteration 200/1500. Validation Loss = 2.020, Validation Accuracy = 0.288\nIteration 300/1500: Train Loss = 1.892, Train Accuracy = 0.360\nIteration 300/1500. Validation Loss = 1.959, Validation Accuracy = 0.324\nIteration 400/1500: Train Loss = 1.931, Train Accuracy = 0.380\nIteration 400/1500. Validation Loss = 1.921, Validation Accuracy = 0.346\nIteration 500/1500: Train Loss = 1.874, Train Accuracy = 0.380\nIteration 500/1500. Validation Loss = 1.891, Validation Accuracy = 0.351\nIteration 600/1500: Train Loss = 1.834, Train Accuracy = 0.405\nIteration 600/1500. Validation Loss = 1.868, Validation Accuracy = 0.356\nIteration 700/1500: Train Loss = 1.835, Train Accuracy = 0.420\nIteration 700/1500. Validation Loss = 1.850, Validation Accuracy = 0.362\nIteration 800/1500: Train Loss = 1.727, Train Accuracy = 0.460\nIteration 800/1500. Validation Loss = 1.842, Validation Accuracy = 0.359\nIteration 900/1500: Train Loss = 1.753, Train Accuracy = 0.435\nIteration 900/1500. Validation Loss = 1.817, Validation Accuracy = 0.377\nIteration 1000/1500: Train Loss = 1.734, Train Accuracy = 0.430\nIteration 1000/1500. Validation Loss = 1.816, Validation Accuracy = 0.382\nIteration 1100/1500: Train Loss = 1.759, Train Accuracy = 0.420\nIteration 1100/1500. Validation Loss = 1.795, Validation Accuracy = 0.383\nIteration 1200/1500: Train Loss = 1.732, Train Accuracy = 0.450\nIteration 1200/1500. Validation Loss = 1.788, Validation Accuracy = 0.383\nIteration 1300/1500: Train Loss = 1.759, Train Accuracy = 0.435\nIteration 1300/1500. Validation Loss = 1.780, Validation Accuracy = 0.390\nIteration 1400/1500: Train Loss = 1.802, Train Accuracy = 0.455\nIteration 1400/1500. Validation Loss = 1.779, Validation Accuracy = 0.386\nIteration 1499/1500: Train Loss = 1.728, Train Accuracy = 0.445\nIteration 1499/1500. Validation Loss = 1.759, Validation Accuracy = 0.406\nIteration 0/1500: Train Loss = 2.303, Train Accuracy = 0.275\nIteration 0/1500. Validation Loss = 2.296, Validation Accuracy = 0.153\nIteration 100/1500: Train Loss = 1.730, Train Accuracy = 0.485\nIteration 100/1500. Validation Loss = 1.736, Validation Accuracy = 0.381\nIteration 200/1500: Train Loss = 1.641, Train Accuracy = 0.530\nIteration 200/1500. Validation Loss = 1.621, Validation Accuracy = 0.433\nIteration 300/1500: Train Loss = 1.481, Train Accuracy = 0.585\nIteration 300/1500. Validation Loss = 1.630, Validation Accuracy = 0.445\nIteration 400/1500: Train Loss = 1.516, Train Accuracy = 0.630\nIteration 400/1500. Validation Loss = 1.596, Validation Accuracy = 0.431\nIteration 500/1500: Train Loss = 1.373, Train Accuracy = 0.655\nIteration 500/1500. Validation Loss = 1.548, Validation Accuracy = 0.469\nIteration 600/1500: Train Loss = 1.495, Train Accuracy = 0.605\nIteration 600/1500. Validation Loss = 1.557, Validation Accuracy = 0.476\nIteration 700/1500: Train Loss = 1.500, Train Accuracy = 0.620\nIteration 700/1500. Validation Loss = 1.517, Validation Accuracy = 0.483\nIteration 800/1500: Train Loss = 1.611, Train Accuracy = 0.625\nIteration 800/1500. Validation Loss = 1.640, Validation Accuracy = 0.470\nIteration 900/1500: Train Loss = 1.316, Train Accuracy = 0.705\nIteration 900/1500. Validation Loss = 1.500, Validation Accuracy = 0.473\nIteration 1000/1500: Train Loss = 1.377, Train Accuracy = 0.645\nIteration 1000/1500. Validation Loss = 1.508, Validation Accuracy = 0.475\nIteration 1100/1500: Train Loss = 1.258, Train Accuracy = 0.695\nIteration 1100/1500. Validation Loss = 1.482, Validation Accuracy = 0.487\nIteration 1200/1500: Train Loss = 1.271, Train Accuracy = 0.675\nIteration 1200/1500. Validation Loss = 1.534, Validation Accuracy = 0.452\nIteration 1300/1500: Train Loss = 1.213, Train Accuracy = 0.725\nIteration 1300/1500. Validation Loss = 1.428, Validation Accuracy = 0.523\nIteration 1400/1500: Train Loss = 1.473, Train Accuracy = 0.685\nIteration 1400/1500. Validation Loss = 1.549, Validation Accuracy = 0.489\nIteration 1499/1500: Train Loss = 1.447, Train Accuracy = 0.720\nIteration 1499/1500. Validation Loss = 1.685, Validation Accuracy = 0.461\n"
],
[
"# Visualize a learning curve for different activation functions\nfor layer_name in layers_name:\n plt.plot(range(0, num_iterations + 1, val_iteration), results[layer_name], '-o', label = layer_name)\nplt.xlabel('Epoch')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Neural Networks: Task 4 [10 points]\n\nAlthough typically a [Softmax](https://en.wikipedia.org/wiki/Softmax_function) layer is coupled with a [Cross Entropy loss](https://en.wikipedia.org/wiki/Cross_entropy#Cross-entropy_error_function_and_logistic_regression), this is not necessary and you can use a different loss function. Next, implement the network with the Softmax layer paired with a [Hinge loss](https://en.wikipedia.org/wiki/Hinge_loss). Beware, with the Softmax layer all the output dimensions depend on all the input dimensions, hence, you need to compute the Jacobian of derivatives $\\frac{\\partial o_i}{dx_j}$. \n\nImplement the ***forward*** and ***backward*** methods for \n***SoftMaxLayer*** in **layers.py** file and ***CrossEntropyLoss*** and ***HingeLoss*** in **losses.py** file.\n\nResults of using SoftMaxLoss and SoftMaxLayer + CrossEntropyLoss should be the same.\n",
"_____no_output_____"
]
],
[
[
"# DONT CHANGE THE SEED AND THE DEFAULT PARAMETERS. OTHERWISE WE WILL NOT BE ABLE TO CORRECT YOUR ASSIGNMENT!\n# Seed\nnp.random.seed(42)\n\n# Default parameters. \nnum_iterations = 1500\nval_iteration = 100\nbatch_size = 200\nlearning_rate = 2e-3\nweight_decay = 0\nweight_scale = 0.0001\n\n########################################################################################\n# TODO: #\n# Build the model with the structure: #\n# linear_layer-relu-linear_layer-softmax_layer-hinge_loss. #\n# Train this model using Solver class with SGD optimizer. In configuration of the #\n# optimizer you need to specify only the learning rate. Use the fit method to train. # \n########################################################################################\n\nprint '######## Sanity Check for the Softmax layer, otherwise this isn\\'t tested #######'\nlayer_params = {'input_size': X_train.shape[1], 'output_size':num_hidden_units, 'weight_decay': weight_decay }\nmodel = Network()\nmodel.add_layer(LinearLayer(layer_params))\nmodel.add_layer(ReLULayer(layer_params))\nlayer_params2 = {'input_size': num_hidden_units, 'output_size':10, 'weight_decay': weight_decay }\nmodel.add_layer(LinearLayer(layer_params2))\nmodel.add_layer(SoftMaxLayer())\nmodel.add_loss(CrossEntropyLoss)\noptimizer = SGD()\noptimizer_config = {'learning_rate': learning_rate}\nsolver = Solver(model)\nsolver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = True, num_iterations = num_iterations)\n\nprint 'As can be seen this has the exact same validations and training scores as the SoftmaxLoss layer.\\n'\n\nprint '######## Now on to the Hinge Loss #######'\n\nlayer_params = {'input_size': X_train.shape[1], 'output_size':num_hidden_units, 'weight_decay': weight_decay }\nmodel = Network()\nmodel.add_layer(LinearLayer(layer_params))\nmodel.add_layer(ReLULayer(layer_params))\nlayer_params2 = {'input_size': num_hidden_units, 'output_size':10, 'weight_decay': weight_decay }\nmodel.add_layer(LinearLayer(layer_params2))\nmodel.add_layer(SoftMaxLayer())\nmodel.add_loss(HingeLoss)\noptimizer = SGD()\noptimizer_config = {'learning_rate': learning_rate}\nsolver = Solver(model)\nsolver.fit(X_train, Y_train, optimizer,\n x_val = X_val, y_val = Y_val,\n optimizer_config = optimizer_config,\n verbose = True, num_iterations = num_iterations)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\n\n########################################################################################\n# TODO: #\n# Compute the accuracy on the test set. #\n########################################################################################\ntest_acc = solver.score(X_test, Y_test)\n########################################################################################\n# END OF YOUR CODE #\n########################################################################################\nprint(\"Test Accuracy = {0:.3f}\".format(test_acc))",
"######## Sanity Check for the Softmax layer, otherwise this isn't tested #######\nIteration 0/1500: Train Loss = 2.301, Train Accuracy = 0.320\nIteration 0/1500. Validation Loss = 2.291, Validation Accuracy = 0.193\nIteration 100/1500: Train Loss = 1.711, Train Accuracy = 0.450\nIteration 100/1500. Validation Loss = 1.735, Validation Accuracy = 0.393\nIteration 200/1500: Train Loss = 1.690, Train Accuracy = 0.510\nIteration 200/1500. Validation Loss = 1.644, Validation Accuracy = 0.426\nIteration 300/1500: Train Loss = 1.492, Train Accuracy = 0.610\nIteration 300/1500. Validation Loss = 1.572, Validation Accuracy = 0.446\nIteration 400/1500: Train Loss = 1.599, Train Accuracy = 0.540\nIteration 400/1500. Validation Loss = 1.609, Validation Accuracy = 0.425\nIteration 500/1500: Train Loss = 1.426, Train Accuracy = 0.635\nIteration 500/1500. Validation Loss = 1.538, Validation Accuracy = 0.456\nIteration 600/1500: Train Loss = 1.579, Train Accuracy = 0.545\nIteration 600/1500. Validation Loss = 1.605, Validation Accuracy = 0.434\nIteration 700/1500: Train Loss = 1.432, Train Accuracy = 0.670\nIteration 700/1500. Validation Loss = 1.489, Validation Accuracy = 0.463\nIteration 800/1500: Train Loss = 1.621, Train Accuracy = 0.630\nIteration 800/1500. Validation Loss = 1.586, Validation Accuracy = 0.448\nIteration 900/1500: Train Loss = 1.560, Train Accuracy = 0.645\nIteration 900/1500. Validation Loss = 1.495, Validation Accuracy = 0.467\nIteration 1000/1500: Train Loss = 1.358, Train Accuracy = 0.630\nIteration 1000/1500. Validation Loss = 1.572, Validation Accuracy = 0.476\nIteration 1100/1500: Train Loss = 1.510, Train Accuracy = 0.590\nIteration 1100/1500. Validation Loss = 1.615, Validation Accuracy = 0.446\nIteration 1200/1500: Train Loss = 1.550, Train Accuracy = 0.630\nIteration 1200/1500. Validation Loss = 1.452, Validation Accuracy = 0.485\nIteration 1300/1500: Train Loss = 1.516, Train Accuracy = 0.625\nIteration 1300/1500. Validation Loss = 1.528, Validation Accuracy = 0.458\nIteration 1400/1500: Train Loss = 1.291, Train Accuracy = 0.715\nIteration 1400/1500. Validation Loss = 1.452, Validation Accuracy = 0.493\nIteration 1499/1500: Train Loss = 1.722, Train Accuracy = 0.605\nIteration 1499/1500. Validation Loss = 1.681, Validation Accuracy = 0.452\nAs can be seen this has the exact same validations and training scores as the SoftmaxLoss layer.\n\n######## Now on to the Hinge Loss #######\nIteration 0/1500: Train Loss = 0.900, Train Accuracy = 0.215\nIteration 0/1500. Validation Loss = 0.889, Validation Accuracy = 0.160\nIteration 100/1500: Train Loss = 0.465, Train Accuracy = 0.395\nIteration 100/1500. Validation Loss = 0.438, Validation Accuracy = 0.398\nIteration 200/1500: Train Loss = 0.409, Train Accuracy = 0.525\nIteration 200/1500. Validation Loss = 0.416, Validation Accuracy = 0.418\nIteration 300/1500: Train Loss = 0.330, Train Accuracy = 0.570\nIteration 300/1500. Validation Loss = 0.381, Validation Accuracy = 0.442\nIteration 400/1500: Train Loss = 0.381, Train Accuracy = 0.555\nIteration 400/1500. Validation Loss = 0.400, Validation Accuracy = 0.453\nIteration 500/1500: Train Loss = 0.379, Train Accuracy = 0.565\nIteration 500/1500. Validation Loss = 0.360, Validation Accuracy = 0.476\nIteration 600/1500: Train Loss = 0.346, Train Accuracy = 0.595\nIteration 600/1500. Validation Loss = 0.361, Validation Accuracy = 0.465\nIteration 700/1500: Train Loss = 0.328, Train Accuracy = 0.635\nIteration 700/1500. Validation Loss = 0.355, Validation Accuracy = 0.451\nIteration 800/1500: Train Loss = 0.316, Train Accuracy = 0.605\nIteration 800/1500. Validation Loss = 0.359, Validation Accuracy = 0.487\nIteration 900/1500: Train Loss = 0.307, Train Accuracy = 0.590\nIteration 900/1500. Validation Loss = 0.341, Validation Accuracy = 0.489\nIteration 1000/1500: Train Loss = 0.315, Train Accuracy = 0.625\nIteration 1000/1500. Validation Loss = 0.352, Validation Accuracy = 0.461\nIteration 1100/1500: Train Loss = 0.283, Train Accuracy = 0.655\nIteration 1100/1500. Validation Loss = 0.383, Validation Accuracy = 0.470\nIteration 1200/1500: Train Loss = 0.282, Train Accuracy = 0.580\nIteration 1200/1500. Validation Loss = 0.354, Validation Accuracy = 0.467\nIteration 1300/1500: Train Loss = 0.334, Train Accuracy = 0.660\nIteration 1300/1500. Validation Loss = 0.341, Validation Accuracy = 0.493\nIteration 1400/1500: Train Loss = 0.299, Train Accuracy = 0.655\nIteration 1400/1500. Validation Loss = 0.339, Validation Accuracy = 0.507\nIteration 1499/1500: Train Loss = 0.313, Train Accuracy = 0.655\nIteration 1499/1500. Validation Loss = 0.342, Validation Accuracy = 0.480\nTest Accuracy = 0.478\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
4a264fc90b5c0670203178651143e4c74056456a
| 87,767 |
ipynb
|
Jupyter Notebook
|
fundamentals_2018.9/linear/logistic.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null |
fundamentals_2018.9/linear/logistic.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null |
fundamentals_2018.9/linear/logistic.ipynb
|
topseer/APL_Great
|
7ae3bd06e4d520d023bc9b992c88b6e3c758d551
|
[
"MIT"
] | null | null | null | 64.110299 | 26,376 | 0.731277 |
[
[
[
"import warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
],
[
"%matplotlib inline",
"_____no_output_____"
],
[
"import numpy as np\nimport scipy.stats as stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\nimport patsy\n\nsns.set(style=\"whitegrid\")",
"_____no_output_____"
]
],
[
[
"# Logistic Regression\n\nIn the last section, we looked at how we can use a linear model to fit a numerical target variable like child IQ (or price or height or weight). Regardless of the method you use, this is called a *regression problem*. Linear regression is one way of solving the regression problem.\n\nWhen your target variable is a categorical variable, this is a *classification problem*. For now we'll work only with the case where there are two outcomes or labels.\n\nWith our numerical $y$, we started with a model:\n\n$\\hat{y} = N(\\beta_0, \\sigma)$\n\nwith the innovation that we could replace the mean in the normal distribution with a linear function of features, $f(X)$.\n\nWe can do the same thing for classification. If we have a binary categorical variable $y$, it has a Bernoulli distribution with probability $p$ as the parameter. We can estimate $y$ as:\n\n$\\hat{y} = p$\n\nor the fraction of \"successes\" in the data. But what if we did the same thing as before? What if $p$ was a function of additional features? We would have:\n\n$\\hat{y} = \\beta_0 + \\beta_1 x$\n\nand we would have a model that represented how the probability of $y$ changes as $x$ changes. Although this sounds good, there is a problem. $\\beta_0 + \\beta_1 x$ is not bounded to the range (0, 1) which we require for probabilities. But it does turn out that there is a solution: we can use a transformation to keep the value in the range (0, 1), the logistic function.",
"_____no_output_____"
],
[
"## The Logistic Function\n\nThe logistic function is:\n\n$logistic(z) = logit^{-1}(z) = \\frac{e^z}{1 + e^z} = \\frac{1}{1 + e^{-z}}$\n\nAnd it looks like the following:",
"_____no_output_____"
]
],
[
[
"def logistic( z):\n return 1.0 / (1.0 + np.exp( -z))\n\nfigure = plt.figure(figsize=(5,4))\n\naxes = figure.add_subplot(1, 1, 1)\n\nxs = np.linspace( -10, 10, 100)\nys = logistic( xs)\n\naxes.plot( xs, ys)\naxes.set_ylim((-0.1, 1.1))\naxes.set_title(\"Logistic Function\")\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"No matter what the value of $x$, the value of $y$ is always between 0 and 1 which is exactly what we need for a probability.\n\nThere are a few additional things to note at this point. First, there is not a single definition of logistic regression. Gelman defines logistic regression as:\n\n$P(y=1) = logit^{-1}(\\beta_0 + \\beta_1 x)$\n\nin terms of the inverse logit function. Such a function returns the probability that $y = 1$. There are other possibilites (for example, a general maximum entropy model).\n\nSecond, interpreting the coefficients becomes a bit of a problem. Let's assume that we have no features and only have:\n\n$P(y=1) = logit^{-1}(\\beta_0)$\n\nwhat, exactly, is $\\beta_0$? It's not a probability because the probability interpretation only takes place once we have transformed the result using the inverse logit function. We take note of the following truism:\n\n$logit^{-1}( logit( p)) = p$\n\nThis is simply what it means to be an inverse function of some other function. But the interesting thing is that this means that:\n\n$\\beta_0 = logit( p)$\n\nand we do know what $logit(p)$ is, it's the *log odds*. $logit$ is defined as:\n\n$logit(p) = log(\\frac{p}{1-p})$\n\nif $p$ is the probability of an event, then $\\frac{p}{1-p}$ is the log odds of the event (the ratio of the probability for an event and the probability against the event).\n\nThe third difficulty is that the logistic regression is non-linear. For linear regression, the slopes of the curve (a line) are constant (the $\\beta$s) and while logistic regression has a linear predictor, the result is non-linear in the probability space. For example, a 0.4 point increase in log odds from 0.0 to 0.4 increases probability from 50% to 60% but a 0.4 point increase in log odds from 2.2 to 2.6 only increases probability from 90% to 93%.\n\nFinally, we lose a lot of our ability to visualize what's going on when we moved from linear regression to logistic regression.",
"_____no_output_____"
],
[
"## What Loss does Logistic Regression minimize?\n\nSeveral times now, we've turned the question of variance on its head and asked, \"what estimate minimizes my error?\". For a single prediction, $\\beta_0$, of a numerical variable, we know that we want some value that minimizes Mean Squared Error (MSE):\n\n$MSE = \\frac{1}{n}\\sum(y - \\beta_0)^2$\n\nand that this means our prediction of $\\beta_0$ should be the mean, $\\bar{y}$. This is also true for *linear* regression where we minimize MSE:\n\n$MSE = \\frac{1}{n}\\sum(y - \\hat{y})^2$\n\nAlso note that we can use MSE to *evaluate* linear regression (it, or some variant, is really the only way we have to evaluate linear regression's predictions).\n\nWe do not use MSE for logistic regression, however, mostly because we want something with a better first derivative. Instead of MSE, we often use *cross entropy* (also called *log loss*, we'll stick with cross entropy):\n\n$L(\\beta) = -\\frac{1}{n} \\sum y log(\\hat{y}) + (1-y) log(1-\\hat{y})$\n\nThis has several implications:\n\n1. Just because \"regression\" is in the name, we do not use Mean Squared Error to derive or evaluation logistic regression.\n2. Although we do use cross entropy to derive logistic regression, we do *not* use it to evaluate logistic regression. We tend to use error rate and other metrics to evaluate it (which we will discuss in a few chapters). For now, we will just use error rate.",
"_____no_output_____"
],
[
"## Logistic Regression with Continuous Feature (Synthetic Data)\n\nAs before, we're going to start with synthetic data to get our proverbial feet wet. Even here, generating synthetic data isn't as easy as it is for linear regression. We basically need to estimate the $p$ for each value of $x$ and then simulate it. The algorithm is something like this:\n\n```\n1. generate x using the standard normal distribution or binomial if categorical.\n2. for each data point:\n3. z = beta_0 + beta_1 * x\n4. pr = 1/(1_exp(-z))\n5. y = 1 if rand() < pr else 0\n```\n\nOf note, the logistic function does not output $\\hat{y}$ as it does with linear regression. It outputs the estimated probability of $y=1$. We can take that probability and compare it to a threshold and assign $y = 0$ or $y = 1$. The $y$ above is the *real* y for the synthetic data.",
"_____no_output_____"
]
],
[
[
"np.random.seed(83474722)",
"_____no_output_____"
],
[
"data = {}\ndata[\"x\"] = stats.norm.rvs(0, 1, 100)\ndata[\"z\"] = 0.5 + data[\"x\"] * 0.5\ndata[\"pr\"] = list(map(lambda z: logistic(z), data[\"z\"]))\ndata[\"y\"] = list(map(lambda pr: 1 if np.random.uniform() < pr else 0, data[\"pr\"]))\ndata = pd.DataFrame(data)",
"_____no_output_____"
]
],
[
[
"It's worth taking a bit more in-depth look at this data even though it's synthetic (or *because* it's synthetic). We generated $x$ from the standard Normal distribution: $x \\sim N(0, 1)$. $z$, an intermediate step, is the actual linear model: $z = \\beta_0 + \\beta_1 x$ or $z = 0.5 + 0.5 x$.\n\nAs the earlier discussion mentions, we pass $z$ through the logistic function to bound it to the interval (0, 1). The result, $pr$, represents a probability. This is a conditional probability: $P(y=1|x)$. In order to find out the \"true\" $y$ for each $x$, we simulate that probability. \n\nAs we can see in the table below, we have $pr=0.663$ and $y=1$ (obs 1) as well as $pr=0.857$ and $y=0$. This is logistic regression's form of \"error\", \"noise\", or the \"known unknowns and unknown unknowns\".",
"_____no_output_____"
]
],
[
[
"data.head()",
"_____no_output_____"
]
],
[
[
"We could use a constant model as we have before:",
"_____no_output_____"
]
],
[
[
"np.mean(data.y)",
"_____no_output_____"
]
],
[
[
"No matter what $x$ is, we say there's a 57% probability that the value of $y$ is 1. Since 57% is over 50%, we could just guess that for any $x$, $\\hat{y} = 1$. We would be right 57 of the time and wrong 43% of the time on average. 43% is the model's *error rate*.\n\nCan we do better?",
"_____no_output_____"
]
],
[
[
"import sys\nsys.path.append('../resources')\nimport fundamentals.models as models",
"_____no_output_____"
],
[
"result = models.logistic_regression(\"y ~ x\", data = data)",
"_____no_output_____"
],
[
"models.simple_describe_lgr(result)",
"_____no_output_____"
]
],
[
[
"We do a *little* better. The error rate here is 42% instead of (1-0.57) or 43% but that's not very encouraging. Logistic Regression doesn't actually *have* an $R^2$ metric. What we have shown here is Efron's Pseudo $R^2$. It basically measures the same thing as interpretation #1 of the \"real\" $R^2$: it's the percent of the variability in $y$ explained by the model. Not very much.\n\nAdditionally, our estimates of the coefficients, $\\beta_0$ and $\\beta_1$, are pretty bad compared to the ground truth in the synthetic data. In the linear regression case we were able to recover them fairly easily. Why is the synthetic data so bad?\n\nNote that our base probability is not really much different than a coin toss (57% versus 50%). Assume a given $x$ leads to a probability of 65%. We need a lot more examples of $x$ to calculate that 65%...if we only have a few, we may never actually observe the case where $y=1$.\n\nWhat happens with the current data generator if we just generate more data, n=10,000 instead of n=100?",
"_____no_output_____"
]
],
[
[
"data = {}\ndata[\"x\"] = stats.norm.rvs(0, 1, 10000)\ndata[\"z\"] = 0.5 + data[\"x\"] * 0.5\ndata[\"pr\"] = list(map(lambda z: logistic(z), data[\"z\"]))\ndata[\"y\"] = list(map(lambda pr: 1 if np.random.uniform() < pr else 0, data[\"pr\"]))\ndata = pd.DataFrame(data)",
"_____no_output_____"
]
],
[
[
"We can re-run our logistic regression on this data:",
"_____no_output_____"
]
],
[
[
"result1 = models.logistic_regression(\"y ~ x\", data = data)\nmodels.simple_describe_lgr(result1)",
"_____no_output_____"
]
],
[
[
"Our coefficient estimates are almost exactly the same as the ground truth. Still our error rate is 36.3% instead of 43.0%. This is probably as good as we can get. What happens if bump up the base probability a bit?",
"_____no_output_____"
]
],
[
[
"data = {}\ndata[\"x\"] = stats.norm.rvs(0, 1, 10000)\ndata[\"z\"] = 0.75 + data[\"x\"] * 10\ndata[\"pr\"] = list(map(lambda z: logistic(z), data[\"z\"]))\ndata[\"y\"] = list(map(lambda pr: 1 if np.random.uniform() < pr else 0, data[\"pr\"]))\ndata = pd.DataFrame(data)",
"_____no_output_____"
]
],
[
[
"Let's look at this data. The probabilities of each observation are now either very near 0 or very near 1:",
"_____no_output_____"
]
],
[
[
"data.head()",
"_____no_output_____"
]
],
[
[
"The constant model shows a probability of 52.2% for $y=1$. This means it has an error rate of 47.8%!",
"_____no_output_____"
]
],
[
[
"np.mean(data.y)",
"_____no_output_____"
]
],
[
[
"What about our logistic regression model?",
"_____no_output_____"
]
],
[
[
"result2 = models.logistic_regression(\"y ~ x\", data = data)\nmodels.simple_describe_lgr(result2)",
"_____no_output_____"
]
],
[
[
"The coefficients are almost exact *and* the error rate is only 5.1%. The (pseudo) $R^2$ shows that our model explains 85% of the variation in $y$.\n\nThis set of experiments shows us a number of things. First, generating synthetic data is very useful for learning how your algorithms work. In fact, let's do one more experiment. Let's reduce the number of observations back to 100:",
"_____no_output_____"
]
],
[
[
"data = {}\ndata[\"x\"] = stats.norm.rvs(0, 1, 10000)\ndata[\"z\"] = 0.75 + data[\"x\"] * 10\ndata[\"pr\"] = list(map(lambda z: logistic(z), data[\"z\"]))\ndata[\"y\"] = list(map(lambda pr: 1 if np.random.uniform() < pr else 0, data[\"pr\"]))\ndata = pd.DataFrame(data)",
"_____no_output_____"
],
[
"result3 = models.logistic_regression(\"y ~ x\", data = data)\nmodels.simple_describe_lgr(result3)",
"_____no_output_____"
]
],
[
[
"Here our error rate is still quite a bit lower but the estimates of our coefficients aren't as good. We need both a lot of data and clear underlying pattern *and* this pattern isn't as obvious as it is with linear regression.",
"_____no_output_____"
],
[
"## Logistic Regression with Real Data\n\nWhen it comes to either numerical features or a binary categorical features, there is no difference between linear regression and logistic regression. We can have numerical features which will affect the slope and intercept of the line. We can have binary categorical features that will affect the intercept of the line. We can have interaction terms that will affect the slope of the line.\n\nThe main difference with linear regression is in the interpretation of the coefficients.\n\nFor logistic regression, the coefficients are log-odds and while some people are quite comfortable thinking in terms of log-odds, most are not. How do we convert them into something we can understand?\n\nLet's begin the discussion by looking at real data. This data is from a study of villager behavior in Bangladesh. Wells were examined for natural arsenic contamination and villagers using wells with higher arsenic readings were encouraged to use other wells or dig new ones. The variables are:\n\n* **switch** - yes (1) or no (0), did the respondent switch to a new well.\n* **dist** - distance to the nearest safe well in meters.\n* **arsenic** - arsenic level of the respondent’s well.\n* **assoc** - does the respondent or a family member belong to a community association.\n* **educ** - the educational attainment of the respondent in years.\n\nLet's start out with a logistic regression model for $\\hat{switch}$:\n\n$P(\\hat{switch}=1) = logistic^{-1}(\\beta_0 + \\beta_1 dist)$\n\nalthough we really have something like:\n\n$z = \\beta_0 + \\beta_1 dist$\n\n$\\hat{pr} = \\frac{1}{1+e^{-z}}$\n\n$\\hat{y}$ = 1 if $\\hat{pr}$ > 0.5 else 0\n\nwhich is a bit more complicated to write each time.",
"_____no_output_____"
]
],
[
[
"wells = pd.read_csv( \"../resources/data/arsenic.wells.tsv\", sep=\" \")",
"_____no_output_____"
]
],
[
[
"Let's check the representations:",
"_____no_output_____"
]
],
[
[
"wells.info()",
"<class 'pandas.core.frame.DataFrame'>\nInt64Index: 3020 entries, 1 to 3020\nData columns (total 5 columns):\nswitch 3020 non-null int64\narsenic 3020 non-null float64\ndist 3020 non-null float64\nassoc 3020 non-null int64\neduc 3020 non-null int64\ndtypes: float64(2), int64(3)\nmemory usage: 141.6 KB\n"
]
],
[
[
"There is nothing particularly startling here. Let's see a few values:",
"_____no_output_____"
]
],
[
[
"wells.head()",
"_____no_output_____"
]
],
[
[
"The base model (and error rate) are:",
"_____no_output_____"
]
],
[
[
"mean = np.mean(wells.switch)\nprint(\"P(switch=1) = {0:.2f} ({1:.2f})\".format(mean, 1-mean))",
"P(switch=1) = 0.58 (0.42)\n"
]
],
[
[
"The base model (often called the \"null\" model) is that $P(switch=1) = 0.58$ which leads to an error rate of 42%. Let's see what logistic regression can get us:",
"_____no_output_____"
]
],
[
[
"result = models.logistic_regression( \"switch ~ dist\", data = wells)\nmodels.simple_describe_lgr(result)",
"_____no_output_____"
]
],
[
[
"So again, this is the real world and real data and sometimes you only get improvements such as these. Despite the showing on this data, logistic regression is a very powerful modeling technique.\n\nWe can see that the error rate and (pseudo) $R^2$ of this model aren't great but we're much more interested in interpreting the model coefficients. What do they mean?",
"_____no_output_____"
],
[
"### Intercept\n\nThe intercept in this case has a legitimate $dist = 0$ interpretation. If the alternative well is 0 meters away, what is the probability of switching?\n\nWe can use our previous identity and use the inverse logit (logistic) function:",
"_____no_output_____"
]
],
[
[
"logistic( 0.6038)",
"_____no_output_____"
]
],
[
[
"so the probability of switching is 64.7% if the safe well is zero meters away (that doesn't really bode well). If we were to run the logistic regression without any regressors, we could think of $\\beta_0$ as the *prior* log odds. In essence, logistic regression is a function that calculates conditional probabilities based on the features instead of using a table. \n\nOnce you add features, $\\beta_0$ is no longer a pure prior because it has been optimized in the presence of the other features and therefore is still a conditional probability...just with all the features at 0.",
"_____no_output_____"
],
[
"### Coefficients\n\nNext, we can look at each coefficient (or in this case, the only coefficient). The model basically says there is a 0.0062 decrease in *log odds* (remember that the coefficients are not in probability space until transformed) for every meter to the nearest safe well. Now we have a problem. While $\\beta_i$ as *log odds* is linear in $x_i$ (that's *all* the term \"linear model\" means), arbitrary transformations of $\\beta_i$, $t(\\beta_i)$ is not necessarily linear in $x_i$ and that is the case here. How do we get around this problem? \n\n\nThere are several options:\n\n**No. 1 - Evaluate at the mean of the variable with a unit change.**\n\nThe mean value of dist(ance) is 48.33. If we evaluate our model using that value, we get:\n\n$P(switch = 1)$ = $logit^{-1}(0.6038 - 0.0062 \\times 48.33)$ = $logit^{-1}(0.304154)$ = 0.5755\n\nAnd if we do the same thing again after adding 1 meter to the average distance, we get:\n\n$P(switch = 1)$ = $logit^{-1}(0.6038 - 0.0062 \\times 49.33)$ = $logit^{-1}(0.297954)$ = 0.5739\n\nSo...that's a decrease of about 0.0016 percentage points (or 0.27%) which isn't huge but then we only increased the difference by a little over 3 feet!\n\nBut, you need to be careful with mean scaled data (which we'll talk about in the next chapter). A unit change is equal to one entire standard deviation which may be an extremely large value...or an extremely small one.",
"_____no_output_____"
]
],
[
[
"a = logistic( 0.6038 - 0.0062 * 48.33)\nb = logistic( 0.6038 - 0.0062 * 49.33)\nprint(a, b)\nprint(a - b)",
"0.5754576812550608 0.5739422790895422\n0.0015154021655185979\n"
]
],
[
[
"If you have more than one $x_i$, you should set all of them to their mean values and then do a unit change for each variable separately.\n\nNote that this is a good reason to mean *center* data in a logistic regression but not to mean *scale* it. The reason for not mean scaling is that:\n\n1. The coefficients do have clear interpretations and relative magnitudes (changes in probability after transformation).\n2. Mean *scaling* makes 1 unit equal to one standard deviation of the standard normal distribution. This might be a very, very large value in the variables actual domain which messes up the approximation.\n\n**No. 2 - Calculate the derivative of the logistic and evaluate it at the mean of the variable.**\n\n$\\frac{\\partial}{\\partial x_i}logit^{-1}(\\beta X) = \\frac{\\beta_i e^{\\beta X}}{(1 + e^{\\beta X})^2}$\n\nbut $\\beta X$ (z) is just the log odds at the mean values of X (if X is indeed $[1.0, \\bar{x}_1, \\bar{x}_2, ... ,\\bar{x}_n]$) so if we plug our value for the model evaluated at the mean into the derivative, we get:\n\n$\\frac{0.0062 e^{0.0062 \\times 0.3042}}{(1 + e^{0.0062 \\times 0.3042})^2} = 0.0016$",
"_____no_output_____"
]
],
[
[
"def logistic_slope_at( beta, z):\n return (beta * np.exp( beta * z)) / (1.0 + np.exp( beta * z))**2\n\nprint(logistic_slope_at( 0.0062, 0.3042))",
"0.0015499986216064004\n"
]
],
[
[
"**No. 3 - Divide by 4 Rule**\n\n$\\beta_1 / 4 = 0.0062 / 4 = 0.00155$\n\nIt's just a rule of thumb but easy. It has the same general interpretation though...the change in probability from a unit change at the mean of the regressor. Again, this approach can get very funky with mean scaled data because a unit change is a full standard deviation which can actually be enormous or infinitesimal. Why does that work?\n\nThe slope of the logistic curve is maximized where the first derivative is zero or $\\beta_0 + \\beta_1 + x = 0$. We can solve for this as:\n\n\n$\\frac{beta_1e^0}{(1+e^0)^2}$\n\n$\\frac{\\beta_1 \\times 1}{(1+1)^2}$\n\n$\\frac{\\beta_1}{4}$\n\nThis interpretation holds best in the context of mean values for the corresponding feature, $x$.\n\n**No. 4 - Average Predictive Difference**\n\nWe can also average the probabilities over all of our data points for a specific change in each of our predictors. For a model with only one predictor, this amounts to the same thing as No. 1 so we will save this for later.",
"_____no_output_____"
],
[
"## Useful Transformations\n\nWe'll talk more about transformations in the next chapter. The main point of this chapter is to establish what linear and logistic regression are and how to interpret them. However, there is an especially useful transformation for logistic regression and that involves transforming the units.\n\nThis doesn't affect the quality of the model at all. It does, however, change how you interpret it. For example, the current units of $dist$ are meters. The probability of change per *meter* is pretty small. But what about the probability of changing per *ten meters*? That's nearly 33 feet.",
"_____no_output_____"
]
],
[
[
"wells[\"dist10\"] = wells[\"dist\"]/10",
"_____no_output_____"
],
[
"result = models.logistic_regression( \"switch ~ dist10\", data = wells)\nmodels.simple_describe_lgr(result)",
"_____no_output_____"
]
],
[
[
"We can now reinterpret the model. The probability of switching decreases by 1.5% (-00.619/4 = -0.015475 or \"Divide by 4\" rule) at the average when the distance to the safe well increases by 10 meters.\n\nIt's interesting to note that Gelman used 100 to scale his data. The problem with this, in an interpetability sense, is that if you look at distance, the median distance to a safe well is 36.7 meters. The 3rd quartile is 64 meters. A 100 meter difference just doesn't figure prominently into the data even though the maximum distance was 339.5 meters. 10 meters seems like a reasonable unit in this case.\n\nAgain, this doesn't change how good the model is. But it does it easier to talk about than \"15 100ths of a percent per meter\".",
"_____no_output_____"
],
[
"## Plotting Logistic Regression\n\nIt's not quite as easy to plot a logistic regression as it is linear regression. If we just plot the data, we have:",
"_____no_output_____"
]
],
[
[
"figure = plt.figure(figsize=(10,6))\n\naxes = figure.add_subplot(1, 1, 1)\n\nxs = wells[ \"dist10\"]\nys = wells[ \"switch\"]\naxes.scatter( xs, ys, color=\"dimgray\", alpha=0.5)\nbetas = result[ \"coefficients\"]\n\nzs = np.linspace( xs.min(), xs.max(), 100)\nps = [logistic( betas[ 0] + betas[ 1] * x) for x in zs]\n\naxes.plot(zs, ps, '-', color=\"firebrick\", alpha=0.75)\naxes.set_title( result[ \"formula\"])\naxes.set_xlabel(\"dist (10s of meters)\")\naxes.set_ylabel(\"switch\")\n\nplt.show()\nplt.close()",
"_____no_output_____"
]
],
[
[
"It's just not very interesting or informative on its own. Additionally, you have the problem that logistic regression is nonlinear. We'll get into plotting multivariate regression (linear and logistic) in the next chapter. The solution for logistic regression is usually to plot the *decision boundary* in feature space and not to plot the target at all.",
"_____no_output_____"
],
[
"## Bootstrap Inference\n\nAs with linear regression, we can also apply bootstrap inference to logistic regression and with the same results. Here we only show the function in operation. We also make the innovation that we include \"divide by 4\" interpretations of our coefficients:",
"_____no_output_____"
]
],
[
[
"result = models.bootstrap_logistic_regression(\"switch ~ dist10\", wells)\nmodels.describe_bootstrap_lgr(result, 3)",
"_____no_output_____"
]
],
[
[
"The development so far has been pedagogical. You should always do bootstrap inference for both linear and logistic regression and going forward, we will.",
"_____no_output_____"
],
[
"## More than Two Outcomes\n\nBinary classification shows up quite a bit: does it fail or not, does he have a heart attack or not, does she purchase it or not, does he click on it or not. But in many instances, the event is not actually binary. Death is inevitable. It's not \"does he have a heart attack or not\" but \"does he die from a heart attack, cancer, flu, traffic accident, ...\". It's not \"does she purchase it or not\" but \"does she buy the shoes, the shop-vac, the weedwacker, ...\". It's not \"does he click on it or not\" but \"does he click on this, does he click on that, does he go back, ...\".\n\nThis gives us some clue as to how to deal with multiclass classification problems.\n\nFirst, there are classification algorithms that *can* deal with multiclass problems \"directly\". Decision trees are a good example.\n\nSecond, every algorithm that can handle only binary classification can also be made to handle multiclass classification. If the response variable has $n$ possible outcomes then you can train $n$ binary models where each is trained on \"the class\" and \"not the class\". For example, if there were three classes: buy, sell, hold. Then you first convert your data (temporarily) into \"buy/not buy\" and train a model, then convert to \"sell/not sell\" and train a model, then convert to \"hold/not hold\" and train a model:\n\n$z_{buy} = \\beta^{buy}_0 + \\beta^{buy}_1 x_1$\n\nfrom data where the classes are now \"buy/don't buy\".\n\n$z_{sell} = \\beta^{sell}_0 + \\beta^{sell}_1 x_1$\n\nfrom data where the classes are now \"sell/don't sell\"\n\n$z_{hold} = \\beta^{hold}_0 + \\beta^{hold}_1 x_1$\n\nfrom data where the classes are now \"hold/don't hold\"\n\nThis works as long as each model has the same $x_1$ (and this generalizes to more than one feature: $x_1$, $x_2$,...$x_n$). Actually, multinomial logistic regression does this under the covers for you...but things like Support Vector Machines do not and you do have to do it manually.\n\nYou now have a metamodel for multiclass classification. When you need to make a prediction, you use all three models and pick the class that has the highest probability. Strangely, this is essentially what neural networks must do as well.\n\nNote that there's a different although related problem of *multilabel* classification. In this case, each observation might be assigned more than one outcome For example, a story might be fiction *and* sports while another one might non-fiction and sports. A discussion of this problem is beyond the scope of these notes.",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a264fedc2dae416bccb9e21652c81344d86761f
| 1,979 |
ipynb
|
Jupyter Notebook
|
AtCoder/20210522/D.ipynb
|
asha-ndf/asha-public-code
|
f53929513f9827a13e9ba9efafc85d4ada9e767a
|
[
"MIT"
] | null | null | null |
AtCoder/20210522/D.ipynb
|
asha-ndf/asha-public-code
|
f53929513f9827a13e9ba9efafc85d4ada9e767a
|
[
"MIT"
] | null | null | null |
AtCoder/20210522/D.ipynb
|
asha-ndf/asha-public-code
|
f53929513f9827a13e9ba9efafc85d4ada9e767a
|
[
"MIT"
] | null | null | null | 1,979 | 1,979 | 0.557352 |
[
[
[
"def cmb(n, r):\n if n - r < r: r = n - r\n if r == 0: return 1\n if r == 1: return n\n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n\n return result\nA,B,K = list(map(int, input().split(' ')))\nif A*B=0:\n print('a'*A+'b'*B)\n exit()\nans = ''\nfor i in range(A+B):\n tmp = cmb(A+B,A) *A/(A+B)\n if tmp >=K:\n A = A-1\n ans = ans + 'A'\n else:\n B = B-1\n K = K - tmp\n ans = ans + 'A'\nprint(ans)",
"_____no_output_____"
],
[
"\n\n\n\n\n\n",
"_____no_output_____"
],
[
"print(10**8 > 2**30)",
"False\n"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code"
]
] |
4a265695944653341a807fd55e7f6d8b1911e39b
| 96,166 |
ipynb
|
Jupyter Notebook
|
day-two-pandas/More Pandas.ipynb
|
RCEatPitt/data-basics-summer-2018
|
b6ca423550df7a911f45ca04a057ae0e6d02bf30
|
[
"MIT"
] | null | null | null |
day-two-pandas/More Pandas.ipynb
|
RCEatPitt/data-basics-summer-2018
|
b6ca423550df7a911f45ca04a057ae0e6d02bf30
|
[
"MIT"
] | null | null | null |
day-two-pandas/More Pandas.ipynb
|
RCEatPitt/data-basics-summer-2018
|
b6ca423550df7a911f45ca04a057ae0e6d02bf30
|
[
"MIT"
] | null | null | null | 23.375304 | 663 | 0.548011 |
[
[
[
"# More Pandas",
"_____no_output_____"
]
],
[
[
"# Load the necessary libraries\nimport pandas as pd\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Vectorized String Operations\n\n* There is a Pandas way of doing this that is much more terse and compact\n* Pandas has a set of String operations that do much painful work for you\n* Especially handling bad data!",
"_____no_output_____"
]
],
[
[
"data = ['peter', 'Paul', 'MARY', 'gUIDO']\n\nfor s in data:\n print(s.capitalize())",
"_____no_output_____"
]
],
[
[
"* But like above, this breaks very easily with missing values",
"_____no_output_____"
]
],
[
[
"data = ['peter', 'Paul', None, 'MARY', 'gUIDO']\n\nfor s in data:\n print(s.capitalize())",
"_____no_output_____"
]
],
[
[
"* The Pandas library has *vectorized string operations* that handle missing data",
"_____no_output_____"
]
],
[
[
"names = pd.Series(data)\nnames",
"_____no_output_____"
],
[
"names.str.capitalize()\n",
"_____no_output_____"
]
],
[
[
"* Look ma! No errors!\n* Pandas includes a a bunch of methods for doing things to strings.\n\n| | | | |\n|-------------|------------------|------------------|------------------|\n|``len()`` | ``lower()`` | ``translate()`` | ``islower()`` | \n|``ljust()`` | ``upper()`` | ``startswith()`` | ``isupper()`` | \n|``rjust()`` | ``find()`` | ``endswith()`` | ``isnumeric()`` | \n|``center()`` | ``rfind()`` | ``isalnum()`` | ``isdecimal()`` | \n|``zfill()`` | ``index()`` | ``isalpha()`` | ``split()`` | \n|``strip()`` | ``rindex()`` | ``isdigit()`` | ``rsplit()`` | \n|``rstrip()`` | ``capitalize()`` | ``isspace()`` | ``partition()`` | \n|``lstrip()`` | ``swapcase()`` | ``istitle()`` | ``rpartition()`` |\n\n#### Exercise\n\n* In the cells below, try three of the string operations listed above on the Pandas Series `monte`\n* Remember, you can hit tab to autocomplete and shift-tab to see documentation",
"_____no_output_____"
]
],
[
[
"monte = pd.Series(['Graham Chapman', 'John Cleese', 'Terry Gilliam',\n 'Eric Idle', 'Terry Jones', 'Michael Palin'])\nmonte",
"_____no_output_____"
],
[
"# First\n",
"_____no_output_____"
],
[
"# Second\n",
"_____no_output_____"
],
[
"# Third\n",
"_____no_output_____"
]
],
[
[
"## Example: Recipe Database\n\n* Let's walk through the recipe database example from the Python Data Science Handbook\n* There are a few concepts and commands I haven't yet covered, but I'll explain them as I go along\n* Download the recipe file from [this link](https://s3.amazonaws.com/openrecipes/20170107-061401-recipeitems.json.gz) or run the cell below if you are on JupyterHub",
"_____no_output_____"
]
],
[
[
"recipes = pd.read_json(\"https://s3.amazonaws.com/openrecipes/20170107-061401-recipeitems.json.gz\", \n compression='gzip',\n lines=True)",
"_____no_output_____"
]
],
[
[
"We have downloaded the data and loaded it into a dataframe directly from the web.",
"_____no_output_____"
]
],
[
[
"recipes.head()",
"_____no_output_____"
],
[
"recipes.shape",
"_____no_output_____"
]
],
[
[
"We see there are nearly 200,000 recipes, and 17 columns.\nLet's take a look at one row to see what we have:",
"_____no_output_____"
]
],
[
[
"# display the first item in the DataFrame\nrecipes.iloc[0]",
"_____no_output_____"
],
[
"# Show the first five items in the DataFrame\nrecipes.head()",
"_____no_output_____"
]
],
[
[
"There is a lot of information there, but much of it is in a very messy form, as is typical of data scraped from the Web.\nIn particular, the ingredient list is in string format; we're going to have to carefully extract the information we're interested in.\nLet's start by taking a closer look at the ingredients:",
"_____no_output_____"
]
],
[
[
"# Summarize the length of the ingredients string\nrecipes['ingredients'].str.len().describe()",
"_____no_output_____"
],
[
"# which row has the longest ingredients string\nrecipes['ingredients'].str.len().idxmax()",
"_____no_output_____"
],
[
"# use iloc to fetch that specific row from the dataframe\nrecipes.iloc[135598]",
"_____no_output_____"
],
[
"# look at the ingredients string\nrecipes.iloc[135598]['ingredients']",
"_____no_output_____"
]
],
[
[
"* WOW! That is a lot of ingredients! That might need to be cleaned by hand instead of a machine\n* What other questions can we ask of the recipe data?",
"_____no_output_____"
]
],
[
[
"# How many breakfasts?\nrecipes.description.str.contains('[Bb]reakfast').sum()",
"_____no_output_____"
],
[
"# How many have cinnamon as an ingredient?\nrecipes.ingredients.str.contains('[Cc]innamon').sum()",
"_____no_output_____"
],
[
"# How many misspell cinnamon as cinamon?\nrecipes.ingredients.str.contains('[Cc]inamon').sum()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Merging Datasets\n\nOne of the tasks you will need to do for your final project, and in the wide world of data munging, is combining disparate datasets together into a single set. \n\n### Merging the same Data\n\nSometimes you have the same data, but it has been broken up over multiple files (over time or some other distinction). Ultimately what you want is a single dataframe that contains all the data from separate files (or dataframes). Let's load some data into three separate dataframes and then smoosh them together.",
"_____no_output_____"
]
],
[
[
"# Load the data for April, May, and June\napril_url = \"https://data.wprdc.org/datastore/dump/043af2a6-b58f-4a2e-ba5f-7ef868d3296b\"\nmay_url = \"https://data.wprdc.org/datastore/dump/487813ec-d7bc-4ff4-aa74-0334eb909142\"\njune_url = \"https://data.wprdc.org/datastore/dump/d7fd722c-9980-4f7a-a7b1-d1a55a365697\"\n\napril_acj_data = pd.read_csv(april_url)\nmay_acj_data = pd.read_csv(may_url)\njune_acj_data = pd.read_csv(june_url)",
"_____no_output_____"
],
[
"# inspect the dataframes\napril_acj_data.head()",
"_____no_output_____"
],
[
"# inspect the dataframes\nmay_acj_data.head()",
"_____no_output_____"
],
[
"# inspect the dataframes\njune_acj_data.head()",
"_____no_output_____"
]
],
[
[
"As you can see, we have three dataframes with the Allegheny County Jail census for three months. All of the columns are the same so the merge will be relatively straightforward, we just have to concatinate the three dataframes together. Following the Pandas documentation on [Merging, joining, and concatinating object](https://pandas.pydata.org/pandas-docs/stable/merging.html#concatenating-objects), I will use the [`concat()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.concat.html#pandas.concat) function to smoosh the three dataframes into a single dataframe.",
"_____no_output_____"
]
],
[
[
"# put the dataframes I want to smoosh together into a python list\nmonthly_dataframes = [april_acj_data, may_acj_data, june_acj_data]\n\n# use the concat fuction to put them together into a new dataframe\najc_data = pd.concat(monthly_dataframes)\n\n# sample 5 random rows from the dataframe so I can (hopefully) see entries\n# from each of the three months\najc_data.sample(5)",
"_____no_output_____"
]
],
[
[
"Use the `concat()` function to merge identical datasets together. But what if your data don't line up? What do you do then?",
"_____no_output_____"
],
[
"### Merging different data with overlapping columns\n\nThe [PGH 311 Data](https://data.wprdc.org/dataset/311-data) provides a good example for connecting datasets that don't line up, but are still connectable because they share columns. First, let's load up the 311 data.",
"_____no_output_____"
]
],
[
[
"file_path = \"",
"_____no_output_____"
],
[
"# Load the 311 data into a dataframe\nurl = \"https://data.wprdc.org/datastore/dump/76fda9d0-69be-4dd5-8108-0de7907fc5a4\"\npgh_311_data = pd.read_csv(url)\npgh_311_data.head()",
"_____no_output_____"
]
],
[
[
"Now one of the things I like to do with the 311 data is count requests by type. ",
"_____no_output_____"
]
],
[
[
"# count all the unique values in the column REQUEST_TYPE\npgh_311_data['REQUEST_TYPE'].value_counts()",
"_____no_output_____"
],
[
"# make a HUGE horizontal bar chart so we can see the distribution of 311 complaints\n# it took me a bunch of guesses to figure out the right figure size\npgh_311_data['REQUEST_TYPE'].value_counts(ascending=True).plot.barh()",
"_____no_output_____"
],
[
"# make a HUGE horizontal bar chart so we can see the distribution of 311 complaints\n# it took me a bunch of guesses to figure out the right figure size\npgh_311_data['REQUEST_TYPE'].value_counts(ascending=True).plot.barh(figsize=(10,50))",
"_____no_output_____"
]
],
[
[
"Sweet! But there are 284 different types of requests, this is not very useful. Fortunately the 311 data has a [code book](https://data.wprdc.org/dataset/311-data/resource/7794b313-33be-4a8b-bf80-41751a59b84a) that rolls the request types into a set of higher level categories. Note, the code book is a Microsoft Excel file so we got to use the `read_excel()` function instead of `read_csv()`.",
"_____no_output_____"
]
],
[
[
"# load the 311 data code book\nurl = \"https://data.wprdc.org/dataset/a8f7a1c2-7d4d-4daa-bc30-b866855f0419/resource/7794b313-33be-4a8b-bf80-41751a59b84a/download/311-codebook-request-types.xlsx\"\npgh_311_codes = pd.read_excel(url) # parse the excel sheet\npgh_311_codes.sample(10) # pull ten random rows",
"_____no_output_____"
]
],
[
[
"So we loaded the codebook into a separate dataframe and if we look at it we can see how the `REQUEST_TYPE` from the data corresponds to `Issues` in the code book. Additionally, we can see how there is a higher level `Catagory` associated with each issue in the code book.",
"_____no_output_____"
]
],
[
[
"# find the row for \"Potholes\"\nquery = pgh_311_codes['Issue'] == 'Potholes'\npgh_311_codes[query]",
"_____no_output_____"
],
[
"# find the row for \"Weeds/Debris\"\nquery = pgh_311_codes['Issue'] == 'Weeds/Debris'\npgh_311_codes[query]",
"_____no_output_____"
],
[
"# find the row for \"Building Maintenance\nquery = pgh_311_codes['Issue'] == 'Building Maintenance'\npgh_311_codes[query]",
"_____no_output_____"
]
],
[
[
"If you look at the data you will notice that both \"Weeds/Debris\" and \"Building Maintenance\" belong to the same category of \"Neighborhood Issues.\" Using this mapping we can hopefully make a bit more sense of the data.\n\nNow what we need to do is `merge()` the data. We can look to the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/merging.html#database-style-dataframe-joining-merging) to provide some explaination about how use use this fuction to combine two datasets with overlapping columns with different names.\n\nIn our case what we want to do is *merge* the codebook into the 311 data and add a new column for the category.",
"_____no_output_____"
]
],
[
[
"# merge the two dataframes on the REQUEST_TYPE and ISSUE columns\npgh_311_data_merged = pgh_311_data.merge(pgh_311_codes, left_on=\"REQUEST_TYPE\", right_on=\"Issue\")\npgh_311_data_merged.sample(10)",
"_____no_output_____"
],
[
"# count the numbers of unique values in the Category column\npgh_311_data_merged['Category'].value_counts()",
"_____no_output_____"
]
],
[
[
"This is a much more managably set of categorical values!",
"_____no_output_____"
]
],
[
[
"# make a bar chart of the categories for the merged data\npgh_311_data_merged['Category'].value_counts(ascending=True).plot.barh(figsize=(10,10))",
"_____no_output_____"
]
],
[
[
"Now we can dive into specific categories and find out more.",
"_____no_output_____"
]
],
[
[
"# create a query mask for rows where the Category is equal to the value \"Road/Street Issues\"\nquery = pgh_311_data_merged['Category'] == \"Road/Street Issues\"\n\n# find the rows matching the query, select the Issue column and count the unique values \npgh_311_data_merged[query]['Issue'].value_counts()",
"_____no_output_____"
],
[
"# create a query mask for rows where the Category is equal to the value \"Road/Street Issues\"\nquery = pgh_311_data_merged['Category'] == \"Road/Street Issues\"\n\n# find the rows matching the query, select the Issue column and count the unique values and make a bar chart\npgh_311_data_merged[query]['Issue'].value_counts(ascending=True).plot.barh(figsize=(10,10))",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"An isolated groupby from the Counting categorical values example",
"_____no_output_____"
],
[
"* count the attendance per center",
"_____no_output_____"
]
],
[
[
"# Do the same thing with pands\ncenter_attendance_pandas.groupby('center_name')['attendance_count'].sum().sort_values(ascending=False)\n",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Pivoting Data\n\nLet's look at one of the most exciting datasets in the WPRDC, the [Daily Community Center Attendance records](https://data.wprdc.org/dataset/daily-community-center-attendance)! WOWOW!",
"_____no_output_____"
]
],
[
[
"data_url = \"https://data.wprdc.org/datastore/dump/b7cb30c8-b179-43ff-8655-f24880b0f578\"\n\n# load data and read in the date column as the row index\ndata = pd.read_csv(data_url, index_col=\"date\", parse_dates=True)\ndata = data.drop(columns=\"_id\") \ndata.head()",
"_____no_output_____"
],
[
"# What does the data look like?\ndata.plot()",
"_____no_output_____"
]
],
[
[
"We can pivot the data so the center names are columns and each row is the number of people attending that community center per day. This is basically rotating the data.",
"_____no_output_____"
]
],
[
[
"# Use the pivot function to make column values into columns\ndata.pivot(columns=\"center_name\", values=\"attendance_count\").head()",
"_____no_output_____"
],
[
"data.head(10)",
"_____no_output_____"
]
],
[
[
"That is a lot of NaN, and not the tasty garlicy kind either.\n\nWe might want to break this apart for each Community Center. We can start by inspecting the number rows per center.",
"_____no_output_____"
]
],
[
[
"# count the number of rows per center\ndata.groupby(\"center_name\").count()",
"_____no_output_____"
]
],
[
[
"There are a lot of community centers that don't have a lot of numbers because either 1) they are not very popular or 2) they don't report their daily attendance (more likely given how man NaNs we saw above).\n\nWhat we will do is create a custom filter function that we will apply to ever row in the dataframe using the [groupby filter function](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.core.groupby.DataFrameGroupBy.filter.html). This is some knarly stuff we are doing here. This isn't the plain old filter function, this is a special filter fuction (part of the groupby functionality) that requires you to create a special function to apply to each row. In our case we will make a little function that takes a value and tests to see if it is create than a threshold value (in our case 1000). ",
"_____no_output_____"
]
],
[
[
"# create a function we will use to perform a filtering \n# operation on the data\n# filter out centers that have less then 1000 total entries\ndef filter_less_than(x, threshold):\n #print(x)\n if len(x) > threshold:\n return True\n else:\n return False\n\n \n# def filter_less_than(x):\n# if len(x) > 1000:\n# return True\n# else:\n# return False\n\n# use the custom function to filter out rows\npopular_centers = data.groupby(\"center_name\").filter(filter_less_than, \n threshold=1000)\n# look at what centers are in the data now\npopular_centers.groupby(\"center_name\").count()",
"_____no_output_____"
],
[
"# plot the popular community centers\npopular_centers.plot()",
"_____no_output_____"
],
[
"# Use the pivot function to make rows into columns with only the popular community centers\npivoted_data = popular_centers.pivot(columns=\"center_name\", values=\"attendance_count\")\npivoted_data.head()",
"_____no_output_____"
]
],
[
[
"Still NaN-y, but not as bad. Now lets see what these data look like.",
"_____no_output_____"
]
],
[
[
"# plot the data\npivoted_data.plot(figsize=(10,10))",
"_____no_output_____"
]
],
[
[
"Look at the [cumulative sum](http://www.variation.com/cpa/help/hs108.htm) to see if the attendance is above or below average. ",
"_____no_output_____"
]
],
[
[
"# compute the cumulative sum for every column and make a chart\npivoted_data.cumsum().plot(figsize=(10,10))",
"_____no_output_____"
]
],
[
[
"Looks like Brookline is the winner here, but attendance has tapered off in the past couple years.",
"_____no_output_____"
]
],
[
[
"# Resample and compute the monthly totals for the popular community centers\npivoted_data.resample(\"M\").sum().plot(figsize=(10,10))",
"_____no_output_____"
]
],
[
[
"Looks like monthly is too messy, maybe by year?",
"_____no_output_____"
]
],
[
[
"# yearly resample to monthly, compute the totals, and plot\npivoted_data.resample(\"Y\").sum().plot(figsize=(10,10))",
"_____no_output_____"
],
[
"data.pivot(columns=\"center_name\", values=\"attendance_count\").resample(\"Y\").sum().plot(figsize=(20,10))",
"_____no_output_____"
]
],
[
[
"Looking at the total number of attendance per year per popular community center gives us a bit more information.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## Split, Apply, Combine with numeric data\n\n* The 311 complaints are mainly categorical data, which doesn't let use do more mathematical aggregations\n* Lets grab a different dataset from the WPRDC, the [Allegheny County Jail Daily Census](https://data.wprdc.org/dataset/allegheny-county-jail-daily-census)",
"_____no_output_____"
]
],
[
[
"# Grab three months of data\njanuary17_jail_census = pd.read_csv(\"https://data.wprdc.org/datastore/dump/3b5d9c45-b5f4-4e05-9cf1-127642ad1d17\",\n parse_dates=True,\n index_col='Date')\nfeburary17_jail_census = pd.read_csv(\"https://data.wprdc.org/datastore/dump/cb8dc876-6285-43a8-9db3-90b84eedb46f\",\n parse_dates=True,\n index_col='Date')\nmarch17_jail_census = pd.read_csv(\"https://data.wprdc.org/datastore/dump/68645668-3f89-4831-b1de-de1e77e52dd3\",\n parse_dates=True,\n index_col='Date')\n",
"_____no_output_____"
],
[
"january17_jail_census.head()",
"_____no_output_____"
],
[
"# Use the concat function to combine all three into one dataframe\n# Remember I need to make a list of the all the dataframes for\n# the concat fuction\njail_census = pd.concat([january17_jail_census, \n feburary17_jail_census, \n march17_jail_census])\njail_census",
"_____no_output_____"
],
[
"# remove the \"_id\" column because it is not useful\njail_census.drop(\"_id\", axis=1, inplace=True)\njail_census",
"_____no_output_____"
],
[
"# get just the first day in Feburary 2017\njail_census.loc[\"2017-02-01\"]",
"_____no_output_____"
],
[
"# Compute the average age ate booking by gender for Febuary 1st, 2017\njail_census.loc['2017-02-01'].groupby('Gender')['Age at Booking'].mean()",
"_____no_output_____"
],
[
"# compute the average age at booking by race for Febuary 1st, 2017\njail_census.loc['2017-02-01'].groupby('Race')['Age at Booking'].mean()",
"_____no_output_____"
]
],
[
[
"If we look at the [data dictionary](https://data.wprdc.org/dataset/allegheny-county-jail-daily-census/resource/f0550174-16b0-4f6e-88dc-fa917e74b56c) we can see the following mapping for race categories\n```\nRace of Inmate\nA-ASIAN OR PACIFIC ISLANDER\nB-BLACK OR AFRICAN AMERICAN\nH-HISPANIC \nI-AMERICAN INDIAN OR ALASKAN NATIVE\nU-UNKNOWN\nW-WHITE\n```\nThe `x` category hasn't been described.",
"_____no_output_____"
]
],
[
[
"# how many total rows in the dataset have \"x\" for race\njail_census['Race'].value_counts()['x']",
"_____no_output_____"
],
[
"# Get the statistical summary of age at booking by gender for Febuary 1st, 2017\njail_census.loc['2017-02-01'].groupby('Gender')['Age at Booking'].describe()",
"_____no_output_____"
],
[
"# Compute the difference between Age at Booking and current age\nage_difference = jail_census.loc['2017-02-01']['Current Age'] - jail_census.loc['2017-02-01']['Age at Booking']\nage_difference.value_counts()",
"_____no_output_____"
],
[
"# Compute the average age for each day\njail_census.resample(\"D\").mean()",
"_____no_output_____"
],
[
"# What is with that NaNs?\njail_census.loc['2017-03-19']",
"_____no_output_____"
],
[
"# visualize the number of inmates\njail_census.resample(\"D\").size().plot()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Parsing Time\n\nOften there is date/time data in one of the columns of your dataset. In this case `CREATED_ON` appears to be a date/time for when the 311 complaint was lodged. Unless you specify `parse_dates=True` when you call the read_csv, you will need to re-parse your date/time column into the correct datatype. \n\n\nFor example, if we look at the datatypes for all of the columns in our potholes in Bloomfield dataset we can see been parsed as dates.",
"_____no_output_____"
]
],
[
[
"# inspect the datatypes for each column in the data\nbloomfield_pothole_data.info()",
"_____no_output_____"
]
],
[
[
"Let's fix that! First we parse the `CREATED_ON` column using the `to_datetime()` function. What this does is loop over every value in the column and convert it to a datetime data type. \n\n\n**Important Note**: Even though we just want to look the potholes for Bloomfield, we need to do this operation on the full data, not the subselection of potholes in Bloomfield. Why? It has to do with the way Pandas manages the data behind the scenes, our `bloomfield_pothole_data` is actually a *view* into the larger dataframe, `pgh_311_data_merged`. This means we should change the original data because then we'll see it reflected in our bloomfield/potholes subset. Changing the originaly data as opposed to our subset is also good practice because we might want to look at the temporal distribution for other types of 311 request or other neighborhoods.",
"_____no_output_____"
]
],
[
[
"# replace the CREATED_ON column with parsed dates\npgh_311_data_merged['CREATED_ON'] = pd.to_datetime(pgh_311_data_merged['CREATED_ON'])\npgh_311_data_merged.info()",
"_____no_output_____"
]
],
[
[
"Sweet, now that Pandas is aware of dates we can start doing operations on that data.",
"_____no_output_____"
]
],
[
[
"# ReCreate a query mask for potholes\nquery_potholes = pgh_311_data_merged['REQUEST_TYPE'] == \"Potholes\"\n# Create a query mask for bloomfield\nquery_bloomfield = pgh_311_data_merged['NEIGHBORHOOD'] == \"Bloomfield\"\n\n# create a new dataframe that queries potholes AND bloomfield\nbloomfield_pothole_data = pgh_311_data_merged[query_potholes & query_bloomfield]\n# inspect the new dataframe\nprint(bloomfield_pothole_data.shape)\nbloomfield_pothole_data.head()",
"_____no_output_____"
],
[
"# notice the datatype has changed in our subset of the data\nbloomfield_pothole_data.info()",
"_____no_output_____"
],
[
"# make a temporal index by setting it equal to CREATED_ON\nbloomfield_pothole_data.index = bloomfield_pothole_data['CREATED_ON']\nbloomfield_pothole_data.info()",
"_____no_output_____"
],
[
"# Resample (grouping) by month (\"M\") and counting the number of complaints\nbloomfield_pothole_data['REQUEST_ID'].resample(\"M\").count().plot(figsize=(10,6))",
"_____no_output_____"
]
],
[
[
"It looks like Bloomfield had a MASSIVE spike in pothole complaints this past winter. You can see there is a pattern, it is lowest right before the new year and then it springs in the spring and then it falls again in the fall.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"### Merging Data\n\n* Bringing disparate datasets together is one of the more powerful features of Pandas\n* Like with Python lists, you can `append()` and `concat()` Pandas `Series` and `Dataframes`\n* These functions work best for simple cases",
"_____no_output_____"
]
],
[
[
"# concatinate two series together\nser1 = pd.Series(['A', 'B', 'C'], index=[1, 2, 3])\nser2 = pd.Series(['D', 'E', 'F'], index=[4, 5, 6])\npd.concat([ser1, ser2])",
"_____no_output_____"
],
[
"# concatinate two dataframes\ndf1 = pd.DataFrame({\"A\":[\"A1\", \"A2\"],\n \"B\":[\"B1\",\"B2\"]},index=[1,2])\ndf2 = pd.DataFrame({\"A\":[\"A3\", \"A4\"],\n \"B\":[\"B3\",\"B4\"]},index=[3,4])\npd.concat([df1,df2])",
"_____no_output_____"
],
[
"# concatinate dataframes horizontally\ndf1 = pd.DataFrame({\"A\":[\"A1\", \"A2\"],\n \"B\":[\"B1\",\"B2\"]},index=[1,2])\ndf2 = pd.DataFrame({\"C\":[\"C1\", \"C2\"],\n \"D\":[\"D1\",\"D2\"]},index=[1,2])\npd.concat([df1,df2], axis=1)",
"_____no_output_____"
],
[
"# What happens when indexes don't line up\ndf1 = pd.DataFrame({\"A\":[\"A1\", \"A2\"],\n \"B\":[\"B1\",\"B2\"]},index=[1,2])\ndf2 = pd.DataFrame({\"A\":[\"A3\", \"A4\"],\n \"B\":[\"B3\",\"B4\"]},index=[3,4])\npd.concat([df1,df2], axis=1)",
"_____no_output_____"
],
[
"# create a hierarchical index\ndf1 = pd.DataFrame({\"A\":[\"A1\", \"A2\"],\n \"B\":[\"B1\",\"B2\"]},index=[1,2])\ndf2 = pd.DataFrame({\"A\":[\"A3\", \"A4\"],\n \"B\":[\"B3\",\"B4\"]},index=[3,4])\npd.concat([df1,df2], keys=[\"df1\", 'df2'])",
"_____no_output_____"
]
],
[
[
"### Merging and Joining\n\n* While `concat()` is useful it lacks the power to do complex data merging\n* For example, I have two tables of different data but one overlapping column\n* This is where the `merge()` function becomes useful because it lets you *join* datasets\n* The concept of \"join\" has lots of theory and is a richly developed method for *joining* data",
"_____no_output_____"
],
[
"#### One-to-one joins",
"_____no_output_____"
]
],
[
[
"# create two dataframes with one shared column\ndf1 = pd.DataFrame({'employee': ['Bob', 'Jake', 'Lisa', 'Sue'],\n 'group': ['Accounting', 'Engineering', 'Engineering', 'HR']})\ndf2 = pd.DataFrame({'employee': ['Lisa', 'Bob', 'Jake', 'Sue'],\n 'hire_date': [2004, 2008, 2012, 2014]})",
"_____no_output_____"
],
[
"# display df1\ndf1",
"_____no_output_____"
],
[
"# display df2\ndf2",
"_____no_output_____"
],
[
"# merge df1 and df2 intzo a new dataframe df3\ndf3 = pd.merge(df1, df2)\ndf3",
"_____no_output_____"
]
],
[
[
"* The new dataframe `df3` now has all of the data from df1 and df2\n* The `merge` function automatically connected the two tables on the \"employees\" column\n* But what happens when your data don't line up?",
"_____no_output_____"
],
[
"#### Many-to-one joins\n\n* Sometimes there isn't a one to one relationshp between rows in the two datasets\n* A *many-to-one* join lets you combine these datasets",
"_____no_output_____"
]
],
[
[
"df3",
"_____no_output_____"
],
[
"# make another dataframe about the supervisor for each group\ndf4 = pd.DataFrame({'group': ['Accounting', 'Engineering', 'HR'],\n 'supervisor': ['Carly', 'Guido', 'Steve']})\ndf4",
"_____no_output_____"
],
[
"# Merge df3 from above with the supervisor info in df4\npd.merge(df3,df4)",
"_____no_output_____"
]
],
[
[
"* Notice how the information about Guido, the manager for Engineering, is repeated.\n* While this might seem like duplicated data, it makes it easier to quickly look up Jake and Lisa's supervisor without consulting multiple tables",
"_____no_output_____"
],
[
"#### Many-to-many joins\n\n* Let's combine the employee information with skills information\n* Notice there isn't a one to one or even a one to many relationship between these tables\n* Each group can have multiple skills, so **what do you think will happen?**",
"_____no_output_____"
]
],
[
[
"# Use the employee table specified above\ndf1",
"_____no_output_____"
],
[
"# create a new dataframe with skills information\ndf5 = pd.DataFrame({'group': ['Accounting', 'Accounting',\n 'Engineering', 'Engineering', 'HR', 'HR', 'Librarian'],\n 'skills': ['math', 'spreadsheets', 'coding', 'linux',\n 'spreadsheets', 'organization', 'nunchucks']})\ndf5",
"_____no_output_____"
],
[
"pd.merge(df1, df5)",
"_____no_output_____"
]
],
[
[
"* Amazing, Pandas merge capabilities are very useful\n* But what do you do if the names of your columns don't match?\n* You could change column names...\n* But that is crazy! Just use the `left_on` and `right_on` parameters to the `merge()` function",
"_____no_output_____"
]
],
[
[
"# Use the employee table specified above\ndf1",
"_____no_output_____"
],
[
"# Create a new salary table, but use \"name\" instead of \"employee\" for the column index\ndf3 = df3 = pd.DataFrame({'name': ['Bob', 'Jake', 'Lisa', 'Sue'],\n 'salary': [70000, 80000, 120000, 90000]})\ndf3",
"_____no_output_____"
],
[
"# lets try and merge them without specifying what to merge on\npd.merge(df1, df3)",
"_____no_output_____"
]
],
[
[
"* What are the column names I should specify?",
"_____no_output_____"
]
],
[
[
"# Now lets specify the column name \npd.merge(df1, df3, left_on=\"employee\", right_on=\"name\" )",
"_____no_output_____"
]
],
[
[
"* Notice we now have a redundant employee/name column, this is a by-product of merging different columns\n* If you want to get rid of it you can use the `drop` method",
"_____no_output_____"
]
],
[
[
"# drop the name column, axis=1 means axis='col', which is confusing\npd.merge(df1, df3, left_on=\"employee\", right_on=\"name\" ).drop(\"name\", axis=1)",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"## Plotting with Pandas\n\n* You can plot directly from `pandas` data structures\n* Pandas [has its own interface](https://pandas.pydata.org/pandas-docs/stable/visualization.html#) to matplotlib tied directly to the `Series` and `Dataframe` data structures",
"_____no_output_____"
]
],
[
[
"# We need to import numpy for generating random data\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"* **Important!** You need the following code to render plots inside of Jupyter",
"_____no_output_____"
]
],
[
[
"# Tell matplotlib to render visualizations in the notebook\n%matplotlib inline",
"_____no_output_____"
],
[
"# create some random data\nx = np.linspace(0, 10, 100)\n\n# put that data into a dataframe\ndf = pd.DataFrame({\"y\":np.sin(x), \"z\":np.cos(x)}, index=x)\ndf.head()",
"_____no_output_____"
],
[
"# Plot the data using the plot method\ndf.plot();",
"_____no_output_____"
]
],
[
[
"* Basically, you can add a `.plot()` to the end of any Pandas datastructure and it will make a best guess as to the best way to visualize it.",
"_____no_output_____"
]
],
[
[
"# Plot data in a Series with the plot method\npd.Series(np.random.randint(0,10,10)).plot();",
"_____no_output_____"
]
],
[
[
"* However, be careful calling `.plot()` all willy nilly since it doesn't always produce sensible results",
"_____no_output_____"
]
],
[
[
"# create some random time series data and create a default plot\nrandom_series = pd.Series(np.random.randn(1000), \n index=pd.date_range('1/1/2000', periods=1000))\nrandom_series.plot();",
"_____no_output_____"
]
],
[
[
"* What is cool is you can often use the `.plot()` method after performing some computation on the data\n* For example, we can calculate the [cumulative sum](http://www.variation.com/cpa/help/hs108.htm) (the cumulative sum of differences between the values and the average)\n * Sloping up means above average, sloping down means below average",
"_____no_output_____"
]
],
[
[
"# Plot the cumulative sum of a Series\nrandom_series.cumsum().plot()",
"_____no_output_____"
]
],
[
[
"* The `.plot()` trick also works with Dataframes",
"_____no_output_____"
]
],
[
[
"# create dataframe with four columns and create a default plot\ndf = pd.DataFrame(np.random.randn(1000, 4), index=random_series.index, columns=list('ABCD'))\ndf.head()",
"_____no_output_____"
],
[
"# just plot the dataframe and see what happens\ndf.plot();",
"_____no_output_____"
]
],
[
[
"* Messy! Let's try the cumulative sum trick and see if that looks any better",
"_____no_output_____"
]
],
[
[
"# Plot the cumulative sum of each column\ndf.cumsum().plot();",
"_____no_output_____"
]
],
[
[
"* With pandas you can specify the kind of visualization with the `kind` parameter to `plot()`\n* The default isn't always what you want",
"_____no_output_____"
]
],
[
[
"# plot the sum of the columns\ndf.sum().plot()\n",
"_____no_output_____"
]
],
[
[
"* This is a *bad* visualization, the line imply an ordered relationship between the four categories\n* Let's use a bar chart instead",
"_____no_output_____"
]
],
[
[
"# plot the sum of the columns as bars\ndf.sum().plot(kind='bar')",
"_____no_output_____"
]
],
[
[
"* Almost got it, but the labels on the x axis are a bit wonky. \n* Let's look at the documentation and see if we can find a fix\n\n#### Quick Exercise\n\n* Find the documentation for the `plot()` method of a Pandas `Series`\n * *HINT*: Try Googling\n* What parameter will fix the x labels so they are easier to read?",
"_____no_output_____"
]
],
[
[
"animals = pd.Series([1,5,2,5], index=[\"cats\", \"dogs\", \"chickens\", \"spiders\"])\n\nanimals.plot(kind=\"bar\");",
"_____no_output_____"
],
[
"# answer to the exercise\nanimals = pd.Series([1,5,2,5], index=[\"cats\", \"dogs\", \"chickens\", \"spiders\"])\n\nanimals.plot(kind=\"bar\", rot=0);",
"_____no_output_____"
]
],
[
[
"### Pandas Plot types\n\n* Pandas provides a quick and easy interface to a bunch of different plot types\n* You don't even have to load `matplotlib` (although you do need `%matplotlib inline`)\n* The secret to plotting is Googling, looking at other people's code, and trying things until it works\n * At least, that is how I do it\n\n\n* What is nice about pandas/matplotlib integration is pandas will handle a lot of the boilerplate code for you\n* Then you pass parameters to the `plot()` method to determine how the graph should look",
"_____no_output_____"
]
],
[
[
"# create some random categorical data\ndf2 = pd.DataFrame(np.random.randint(1,100,size=(7,4)), \n columns=['Carbs', 'Fats', 'Proteins', 'Other'], \n index=[\"M\",\"Tu\",\"W\",\"Th\",\"F\",\"Sa\",\"Su\",])\n\n# Plot a bar chart\ndf2.plot(kind=\"bar\")",
"_____no_output_____"
]
],
[
[
"* Bar charts can also be called directly using the `bar()` function ",
"_____no_output_____"
]
],
[
[
"df2.plot.bar()",
"_____no_output_____"
]
],
[
[
"* There are a bunch of parameters for these methdos that let you tweak the vizualization\n* For example, the `stacked` parameter stacks the categorical values so you can easily compare within and across categories",
"_____no_output_____"
]
],
[
[
"df2.plot.bar(stacked=True, rot=0)",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n* Try experimenting with the other plot types\n* Do they make sense for these data?\n\n",
"_____no_output_____"
]
],
[
[
"# move the cursor to the right of the period and hit tab\ndf2.plot.",
"_____no_output_____"
],
[
"# try another plot type\n# move the cursor to the right of the period and hit tab\ndf2.plot.",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Working with Time\n\n* One of the most powerful features of Pandas is its time series functionality\n* Dates and time are a Python and Pandas data type (like integers and strings)\n* By using the `datetime` data types you can do advanced, time-centric analysis\n* One thing to remember about computers is they are *very* specific\n * *Time stamps* - a specific moment in time (July 4th, 2017 at 7:52am and 34 seconds)\n * *Time intervals* - a length of time with start and end points (The year 2017)\n * *Time duration* - a specific length of time (a year, a month, a day)",
"_____no_output_____"
]
],
[
[
"# Datetime in pure Python\nimport datetime\n\ndate = datetime.datetime(year=2017, month=6, day=13)\ndate",
"_____no_output_____"
],
[
"type(date)",
"_____no_output_____"
],
[
"# what is that date's month?\ndate.month",
"_____no_output_____"
],
[
"# what is that date's day?\ndate.day",
"_____no_output_____"
],
[
"# use the parser function in the datautil library to parse human dates\nfrom dateutil import parser\ndate = parser.parse(\"4th of July, 2017\")\ndate",
"_____no_output_____"
],
[
"# get the month\ndate.month",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\nTry some different date strings, see how smart Python can be.",
"_____no_output_____"
]
],
[
[
"my_date = parser.parse(\"<your date string here\")\nmy_date",
"_____no_output_____"
]
],
[
[
"* You can use [*string format codes*](https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) for printing dates and time in different formats (especially useful for making human readable dates)\n* Pass a format string to the `strftime()` method to print out a pretty date",
"_____no_output_____"
]
],
[
[
"# Get the weekday \ndate.strftime(\"%A\")",
"_____no_output_____"
],
[
"date.strftime(\"%B\")",
"_____no_output_____"
],
[
"## Try some of the different string format codes and see what happens\ndate.",
"_____no_output_____"
],
[
"## Try combining a few of them together with punctuation too\ndate.",
"_____no_output_____"
]
],
[
[
"### Working with time in Pandas\n\n* Just like how Pandas has its own datatypes for numbers, Pandas has its own dates and times (to support more granularity)\n* If you have a lot of dates, it is often useful to use the Pandas functions over the native Python functions\n* Pandas is most powerful when you index by time using the `DatetimeIndex`",
"_____no_output_____"
]
],
[
[
"# Create a Series with a DateTime index\nindex = pd.DatetimeIndex(['2014-03-04', '2014-08-04',\n '2015-04-04', '2015-09-04',\n '2016-01-01', '2016-02-16'])\ndata = pd.Series([0, 1, 2, 3, 4, 5], index=index)\ndata",
"_____no_output_____"
]
],
[
[
"* Now that the index is made of DateTimes we can index using date strings\n* Note, this only works on strings",
"_____no_output_____"
]
],
[
[
"# grab the value for a specific day\ndata[\"2015-04-04\"]",
"_____no_output_____"
],
[
"# grab a slice between two dates\ndata['2014-08-01':'2016-01']",
"_____no_output_____"
],
[
"# give me everything from 2015\ndata['2015']",
"_____no_output_____"
]
],
[
[
"* Pandas has some functions to make parsing dates easy too",
"_____no_output_____"
]
],
[
[
"# use the to_datetime function instead of the parser function\ndate = pd.to_datetime(\"4th of July, 2017\")\ndate",
"_____no_output_____"
],
[
"# use string format codes to get the weekday\ndate.strftime(\"%A\")",
"_____no_output_____"
],
[
"# give me today's date\ntoday = pd.to_datetime(\"today\")\ntoday",
"_____no_output_____"
]
],
[
[
"* That is the day, but also the exact time... \n* Timestamps must always be a specific moment",
"_____no_output_____"
],
[
"#### Exercise\n* Use the [*string format codes*](https://docs.python.org/3/library/datetime.html#strftime-strptime-behavior) to print today's date in the \"YYYY-MM-DD\" format. HINT: You will have to combine multiple codes and dashes",
"_____no_output_____"
]
],
[
[
"# Replace the ??? with the write string format code\nprint(today.strftime(\"???\"))",
"_____no_output_____"
]
],
[
[
"### Playing with time on real data\n\n* Let's look at the [311 data for the city of Pittsburgh](https://data.wprdc.org/dataset/311-data) from the WPRDC\n* Did you know, you can give the URL directly to Pandas!",
"_____no_output_____"
]
],
[
[
"# load the 311 data directly from the WPRDC\npgh_311_data = pd.read_csv(\"311_data.csv\")\npgh_311_data.head()",
"_____no_output_____"
]
],
[
[
"* Ok, now we have the data, but we need it to be indexed by date\n* **What column has the date information?**\n* **What format do you think that column is currently in?**\n* **What function might we use to convert that column into dates?**",
"_____no_output_____"
]
],
[
[
"pgh_311_data['CREATED_ON'].head()",
"_____no_output_____"
],
[
"# convert the \"CREATED_ON\" column to dates\npd.to_datetime(pgh_311_data['CREATED_ON']).head()",
"_____no_output_____"
]
],
[
[
"* We can convert the \"CREATED_ON\" column to Pandas `datetime` objects\n* Now we have to set that to the dataframe's index",
"_____no_output_____"
]
],
[
[
"# set the index of pgh_311_data to be the parsed dates in the \"CREATED_ON\" column\npgh_311_data.index = pd.to_datetime(pgh_311_data['CREATED_ON'])\npgh_311_data.head()",
"_____no_output_____"
]
],
[
[
"* Do'h, now we have CREATED_ON twice, that isn't very tidy\n* We can also skip this extra conversion step entirely by specifying the index column and date parsing in `read_csv()` function call.",
"_____no_output_____"
]
],
[
[
"# load the 311 data directly from the WPRDC and parse dates directly\npgh_311_data = pd.read_csv(\"311_data.csv\",\n index_col=\"CREATED_ON\", \n parse_dates=True)\npgh_311_data.head()",
"_____no_output_____"
],
[
"pgh_311_data.info()",
"_____no_output_____"
]
],
[
[
"* Now that the dataframe has been indexed by time we can select 311 complains by time",
"_____no_output_____"
]
],
[
[
"# Select 311 complaints on January 1st, 2016\npgh_311_data['2016-01-01']",
"_____no_output_____"
],
[
"# Select the times just around the new years celebration\npgh_311_data[\"2015-12-31 20:00:00\":\"2016-01-01 02:00:00\"]",
"_____no_output_____"
]
],
[
[
"* Someone clearly had a very roudy new years ",
"_____no_output_____"
],
[
"#### Exercise\n\n* Using the timeseries index selection, select the complaints made today \n* Bonus, try and write your code so it will work on any day you execute it\n * *hint*: try the `pd.datetime('today')` \n * *Another hint*: Remember the DateTime gives you the exact time \n * *Yet another hint*: Datetime indexing only works with string representations ",
"_____no_output_____"
]
],
[
[
"# Write your code here\npgh_311_data[]",
"_____no_output_____"
],
[
"\n\n# create a Pandas datetime for today\ntoday = pd.to_datetime(\"today\")\nformatted_today_string = today.strftime(\"%Y-%m-%d\")\nprint(today)\nprint(formatted_today_string)\n\n# use Pandas date string indexing to retrieve all rows for this today's date\ntodays_311s = pgh_311_data[formatted_today_string]\ntodays_311s",
"_____no_output_____"
]
],
[
[
"### Grouping time with the resample method\n\n* Instead of using the `groupby()` method, you use the `resample()` method to *split* time into groups\n* Then you can *apply* the regular aggregation functions ",
"_____no_output_____"
]
],
[
[
"# compute the mean of complaints per quarter...note this doesn't make sense, but works anyway\npgh_311_data.resample(\"Q\").mean()",
"_____no_output_____"
],
[
"# count the number of complaints per month\npgh_311_data.resample(\"M\").count()",
"_____no_output_____"
]
],
[
[
"* Ok, these data are *begging* to be visualized, so I'm going to give you a teaser of next week ",
"_____no_output_____"
]
],
[
[
"# load up the data visualization libraries\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport seaborn; seaborn.set()",
"_____no_output_____"
],
[
"# Create a graph of the monthly complaint counts\npgh_311_data['REQUEST_ID'].resample(\"M\").count().plot()",
"_____no_output_____"
]
],
[
[
"Try the code above, but re-sampling based upon different date periods. The strings for specifying an offset are located [here](http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases)\n",
"_____no_output_____"
]
],
[
[
"# Try a different resampling here\n\n",
"_____no_output_____"
],
[
"# Try yet another resampling here\n\n",
"_____no_output_____"
]
],
[
[
"* OK, we've done some \"fun\" stuff with Time, but maybe we want to start doing deeper analysis\n* To do that, we need to know what all these columns mean?\n* Fortunately, this dataset has a [data dictionary](https://data.wprdc.org/dataset/311-data/resource/d3e98904-4a86-45fb-9041-0826ab8d56d0), which provides a bit more information.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"## Querying Data\n\n* It is sometimes helpful to think of a Pandas Dataframe as a little database. \n* There is data and information stored in the Pandas Dataframe (or Series) and you want to *retrieve* it.\n* Pandas has multiple mechanisms for getting specific bits of data and information from its data structures. The most common is to use *masking* to select just the rows you want. \n* Masking is a two stage process, first you create a sequence of boolean values based upon a conditional expression--which you can think of as a \"query\"--and then you index your dataframe using that boolean sequence. ",
"_____no_output_____"
]
],
[
[
"center_attendance_pandas.head(10)",
"_____no_output_____"
]
],
[
[
"* What if we only wanted to see attendance for Brookline Community Center",
"_____no_output_____"
]
],
[
[
"query = center_attendance_pandas[\"center_name\"] == \"Brookline Community Center\"\n\nbrookline_center_attendance = center_attendance_pandas[query]\n\nbrookline_center_attendance.head(10)",
"_____no_output_____"
],
[
"# create queries for brookline and greenfield\nbrookline_query = center_attendance_pandas[\"center_name\"] == \"Brookline Community Center\"\ngreenfield_query = center_attendance_pandas[\"center_name\"] == \"Magee Community Center\"\n\n# use the boolean OR operator to select both community centers\ncenter_attendance_pandas[brookline_query | greenfield_query]",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"### Exploring the 311 Data\n\n* Now we can use what we have learned to do some exploratory data analysis on the 311 data\n* First, lets use the `sample()` method to grab 10 random rows so we can get a feel for the data\n",
"_____no_output_____"
]
],
[
[
"# Sample 10 random rows from the dataframe\npgh_311_data.sample(10)",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n\n* What are the possible *origins* of complaints?\n* How many complaints are coming from each source?\n\n*HINT*: Scroll back up to the top of to look at the Dataframes refresher.",
"_____no_output_____"
]
],
[
[
"pgh_311_data['REQUEST_ORIGIN'].value_counts()",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n* *Group* the complaints *by* neighborhood and get the *size* of each group",
"_____no_output_____"
]
],
[
[
"pgh_311_data.groupby('NEIGHBORHOOD').size()",
"_____no_output_____"
],
[
"# Note, for just counting the groupby and value_counts are equivalent\n# There is more than one way to skin the cat (or panda)\npgh_311_data['NEIGHBORHOOD'].value_counts()",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n* *Group* the complaints *by* type and get the *count* for each group",
"_____no_output_____"
]
],
[
[
"pgh_311_data.groupby(\"REQUEST_TYPE\")['REQUEST_TYPE'].count()",
"_____no_output_____"
]
],
[
[
"This categorical data is far too granular. \nFortunately, if we look at the [311 Data](https://data.wprdc.org/dataset/311-data) we can see there is a [311 Issue and Category Codebook](https://data.wprdc.org/dataset/311-data/resource/40ddfbed-f225-4320-b4d2-7f1e09da72a4). Click on that link and check out the Google Sheets preview of that data.\n\nhttps://data.wprdc.org/dataset/311-data/resource/40ddfbed-f225-4320-b4d2-7f1e09da72a4\n\nWhat we need to do is download the CSV from Google Sheets directly into a Pandas dataframe, but this is actually a bit tricky because Google won't easily give us a link to the CSV file.",
"_____no_output_____"
]
],
[
[
"# I googled \"pandas dataframe from google sheets\"\n# and found a solution on Stackoverflow\n# https://stackoverflow.com/a/35246041\nissue_category_mapping = pd.read_csv('https://docs.google.com/spreadsheets/d/' + \n '1DTDBhwXj1xQG1GCBKPqivlzHQaLh2HLd0SjN1XBPUw0' +\n '/export?gid=0&format=csv')\nissue_category_mapping.head(5) # Same result as @TomAugspurger",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n* Merge the `pgh_311_data` with the `issue_category_mapping` so we can count the number of complaints per category\n* *HINT*: You will need to specify the `left_on` and `right_on` parameters",
"_____no_output_____"
]
],
[
[
"# create a new merged dataframe\nmerged_311_data = pd.merge(pgh_311_data, \n issue_category_mapping,\n left_on=\"REQUEST_TYPE\",\n right_on=\"Issue\")\n\nmerged_311_data.head()",
"_____no_output_____"
],
[
"# get rid of redundant columns\nmerged_311_data.drop(['Definition','Department', 'Issue'], \n axis=1, \n inplace=True)\nmerged_311_data.head()",
"_____no_output_____"
]
],
[
[
"#### Exercise\n\n* Now that we have category data, count the number of complaints by category",
"_____no_output_____"
]
],
[
[
"merged_311_data.groupby(\"Category\")['Category'].count().sort_values(ascending=False)",
"_____no_output_____"
],
[
"merged_311_data.groupby(\"Category\").size().sort_values(ascending=False)",
"_____no_output_____"
]
],
[
[
"* Selecting data in a Dateframe\n",
"_____no_output_____"
]
],
[
[
"# Select only rows where NEIGHBORHOOD equals \"Greenfield\" and then count how many complaints came from each source\nmerged_311_data[merged_311_data['NEIGHBORHOOD'] == 'Greenfield'].groupby('REQUEST_ORIGIN').size()",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"Challenge\n\nQuerying and subsetting - Masking. Variables\n\nEXERCISE - time indexing and subsetting data and visuzlaing that subset\n\nCreate a sub dataset of just the potholes in a neighborhood\n\nCreate a plot of number of pothole complaints per month of that data subset",
"_____no_output_____"
]
],
[
[
"#Create a sub dataset of just the potholes in Highland Park \npotholes = data_311['REQUEST_TYPE'] == \"Potholes\"\n\nhighland_park= data_311['NEIGHBORHOOD'] == \"Highland Park\"\n\n\n",
"_____no_output_____"
],
[
"\nhighlandpark_potholes = data_311[potholes & highland_park]\n\nprint(highlandpark_potholes.shape)\nhighlandpark_potholes.head()",
"_____no_output_____"
],
[
"#Plot the number of pothole compaints per month of that data set\n\n\n#Change created on to a date-time object \n#WHERE SHOULD WE PUT THIS????\ndata_311['CREATED_ON'] = pd.to_datetime(data_311['CREATED_ON'])\n\n#Change the index to a date time object \nhighlandpark_potholes.index = highlandpark_potholes['CREATED_ON']\n\n\n#Plot the figure \nhighlandpark_potholes['REQUEST_ID'].resample(\"M\").count().plot(title=\"Highland Park Potholes\", figsize=(10,6))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a265e60cea89662671276723354274a48f80605
| 1,486 |
ipynb
|
Jupyter Notebook
|
Deck.ipynb
|
sharmaronak79/python
|
7d000a613087e522005083453630272bb48bb2ed
|
[
"MIT"
] | null | null | null |
Deck.ipynb
|
sharmaronak79/python
|
7d000a613087e522005083453630272bb48bb2ed
|
[
"MIT"
] | null | null | null |
Deck.ipynb
|
sharmaronak79/python
|
7d000a613087e522005083453630272bb48bb2ed
|
[
"MIT"
] | null | null | null | 24.360656 | 105 | 0.487214 |
[
[
[
"class Deck:\n \n def __init__(self):\n self.deck = [] # start with an empty list\n for suit in suits:\n for rank in ranks:\n self.deck.append(Card(suit,rank)) # build Card objects and add them to the list\n \n def __str__(self):\n deck_comp = '' # start with an empty string\n for card in self.deck:\n deck_comp += '\\n '+card.__str__() # add each Card object's print string\n return 'The deck has:' + deck_comp\n\n def shuffle(self):\n random.shuffle(self.deck)\n \n def deal(self):\n single_card = self.deck.pop()\n return single_card",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a2661b06fe30173f735f65ac84177354edeb146
| 10,657 |
ipynb
|
Jupyter Notebook
|
VideoColorizerColab.ipynb
|
lmiroslaw/DeOldify
|
f8bda0b2d85ef6878dcfe656752b32cf57f37e37
|
[
"MIT"
] | null | null | null |
VideoColorizerColab.ipynb
|
lmiroslaw/DeOldify
|
f8bda0b2d85ef6878dcfe656752b32cf57f37e37
|
[
"MIT"
] | null | null | null |
VideoColorizerColab.ipynb
|
lmiroslaw/DeOldify
|
f8bda0b2d85ef6878dcfe656752b32cf57f37e37
|
[
"MIT"
] | null | null | null | 31.160819 | 592 | 0.610022 |
[
[
[
"<a href=\"https://colab.research.google.com/github/lmiroslaw/DeOldify/blob/master/VideoColorizerColab.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"### **<font color='blue'> Video Colorizer </font>**",
"_____no_output_____"
],
[
"#◢ DeOldify - Colorize your own videos!\n\n\n_FYI: This notebook is intended as a tool to colorize gifs and short videos, if you are trying to convert longer video you may hit the limit on processing space. Running the Jupyter notebook on your own machine is recommended (and faster) for larger video sizes._\n\n####**Credits:**\n\nBig special thanks to:\n\nRobert Bell for all his work on the video Colab notebook, and paving the way to video in DeOldify!\n\nDana Kelley for doing things, breaking stuff & having an opinion on everything.",
"_____no_output_____"
],
[
"\n\n---\n\n\n#◢ Verify Correct Runtime Settings\n\n**<font color='#FF000'> IMPORTANT </font>**\n\nIn the \"Runtime\" menu for the notebook window, select \"Change runtime type.\" Ensure that the following are selected:\n* Runtime Type = Python 3\n* Hardware Accelerator = GPU \n",
"_____no_output_____"
],
[
"#◢ Git clone and install DeOldify",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/lmiroslaw/DeOldify.git DeOldify",
"_____no_output_____"
],
[
"cd DeOldify",
"_____no_output_____"
]
],
[
[
"#◢ Setup",
"_____no_output_____"
]
],
[
[
"#NOTE: This must be the first call in order to work properly!\nfrom deoldify import device\nfrom deoldify.device_id import DeviceId\n#choices: CPU, GPU0...GPU7\ndevice.set(device=DeviceId.GPU0)\n\nimport torch\n\nif not torch.cuda.is_available():\n print('GPU not available.')\n\nfrom os import path",
"_____no_output_____"
],
[
"!pip install -r colab_requirements.txt",
"_____no_output_____"
],
[
"import fastai\nfrom deoldify.visualize import *\nfrom pathlib import Path\ntorch.backends.cudnn.benchmark=True\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning, message=\".*?Your .*? set is empty.*?\")",
"_____no_output_____"
],
[
"!mkdir 'models'\n!wget https://data.deepai.org/deoldify/ColorizeVideo_gen.pth -O ./models/ColorizeVideo_gen.pth",
"_____no_output_____"
],
[
"!wget https://github.com/lmiroslaw/DeOldify/blob/4b5da3d12ee3d02eeda1155ebcc85025f1d7fb4b/resource_images/chckmk.png -O ./resource_images/watermark.png",
"_____no_output_____"
],
[
"colorizer = get_video_colorizer()",
"_____no_output_____"
]
],
[
[
"#◢ Instructions",
"_____no_output_____"
],
[
"### source_url\nType in a url hosting a video from YouTube, Imgur, Twitter, Reddit, Vimeo, etc. Many sources work! GIFs also work. Full list here: https://ytdl-org.github.io/youtube-dl/supportedsites.html NOTE: If you want to use your own video, upload it first to a site like YouTube. \n\n### render_factor\nThe default value of 21 has been carefully chosen and should work -ok- for most scenarios (but probably won't be the -best-). This determines resolution at which the color portion of the video is rendered. Lower resolution will render faster, and colors also tend to look more vibrant. Older and lower quality film in particular will generally benefit by lowering the render factor. Higher render factors are often better for higher quality videos and inconsistencies (flashy render) will generally be reduced, but the colors may get slightly washed out.\n\n### watermarked\nSelected by default, this places a watermark icon of a palette at the bottom left corner of the image. This is intended to be a standard way to convey to others viewing the image that it is colorized by AI. We want to help promote this as a standard, especially as the technology continues to improve and the distinction between real and fake becomes harder to discern. This palette watermark practice was initiated and lead by the company MyHeritage in the MyHeritage In Color feature (which uses a newer version of DeOldify than what you're using here).\n\n### How to Download a Copy\nSimply right click on the displayed video and click \"Save video as...\"!\n\n## Pro Tips\n1. If a video takes a long time to render and you're wondering how well the frames will actually be colorized, you can preview how well the frames will be rendered at each render_factor by using the code at the bottom. Just stop the video rendering by hitting the stop button on the cell, then run that bottom cell under \"See how well render_factor values perform on a frame here\". It's not perfect and you may still need to experiment a bit especially when it comes to figuring out how to reduce frame inconsistency. But it'll go a long way in narrowing down what actually works.\n2. If videos are taking way too much time for your liking, running the Jupyter notebook VideoColorizer.ipynb on your own machine (with DeOldify installed) will generally be much faster (as long as you have the hardware for it). \n3. Longer videos (running multiple minutes) are going to have a rough time on Colabs. You'll be much better off using a local install of DeOldify instead in this case.\n\n## Troubleshooting\nThe video player may wind up not showing up, in which case- make sure to wait for the Jupyter cell to complete processing first (the play button will stop spinning). Then follow these alternative download instructions\n\n1. In the menu to the left, click Files\n2. If you don't see the 'DeOldify' folder, click \"Refresh\"\n3. By default, rendered video will be in /DeOldify/video/result/\n\nIf a video you downloaded doesn't play, it's probably because the cell didn't complete processing and the video is in a half-finished state.",
"_____no_output_____"
],
[
"#◢ Colorize!!",
"_____no_output_____"
]
],
[
[
"source_url = '' #@param {type:\"string\"}\nrender_factor = 21 #@param {type: \"slider\", min: 5, max: 40}\nwatermarked = True #@param {type:\"boolean\"}\n\nif source_url is not None and source_url !='':\n video_path = colorizer.colorize_from_url(source_url, 'video.mp4', render_factor, watermarked=watermarked)\n show_video_in_notebook(video_path)\nelse:\n print('Provide a video url and try again.')",
"_____no_output_____"
]
],
[
[
"#◢ Download",
"_____no_output_____"
]
],
[
[
"import google.colab.files.download('video/result/video1.mp4')",
"_____no_output_____"
]
],
[
[
"## See how well render_factor values perform on a frame here",
"_____no_output_____"
]
],
[
[
"for i in range(10,40,2):\n colorizer.vis.plot_transformed_image('video/bwframes/video/00001.jpg', render_factor=i, display_render_factor=True, figsize=(8,8))",
"_____no_output_____"
]
],
[
[
"---\n#⚙ Recommended video and gif sources \n* [/r/Download Result/](./DeOldify/video/result/video.mp4)\n* [/r/Nickelodeons/](https://www.reddit.com/r/Nickelodeons/)\n* [r/silentmoviegifs](https://www.reddit.com/r/silentmoviegifs/)\n* https://twitter.com/silentmoviegifs ",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a267087688c4250f9599b549443614a656857ab
| 4,073 |
ipynb
|
Jupyter Notebook
|
Part 2 - Regression/Section 5 - Multiple Linear Regression/my.ipynb
|
sandeshpatel/ml-learning
|
9fde30b519735b1d9f32be96e36dcf5b49e74ea1
|
[
"MIT"
] | null | null | null |
Part 2 - Regression/Section 5 - Multiple Linear Regression/my.ipynb
|
sandeshpatel/ml-learning
|
9fde30b519735b1d9f32be96e36dcf5b49e74ea1
|
[
"MIT"
] | null | null | null |
Part 2 - Regression/Section 5 - Multiple Linear Regression/my.ipynb
|
sandeshpatel/ml-learning
|
9fde30b519735b1d9f32be96e36dcf5b49e74ea1
|
[
"MIT"
] | null | null | null | 26.97351 | 124 | 0.481954 |
[
[
[
"class linear_model:\n \n def __init__(self, X, y):\n self.X = X\n self.theta = np.zeros((self.X.shape[1],1))\n self.y = np.atleast_2d(y)\n print(y.shape)\n \n \n\n def fit(self):\n \n print(self.theta)\n self.grad_desc()\n \n \n def grad_desc(self):\n for i in range(7000):\n self.theta = self.theta - 0.1/40*(np.transpose(np.transpose((self.X @ self.theta) - self.y ) @ self.X))\n print(self.theta)\n \n \n def predict(self, X):\n return X @ self.theta\n \n \n \n# Data Preprocessing Template\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('50_Startups.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1:].values\n\n#encoding catagorical data state \nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nlabelencoder_X = LabelEncoder()\nX[:, -1] = labelencoder_X.fit_transform(X[:, -1])\n\nonehotencoder = OneHotEncoder(categorical_features = [3])\nX = onehotencoder.fit_transform(X).toarray()\n\n#Avoind the dummy variable trap\nX = X[:, 1:]\n\nX = np.append(arr = np.ones((X.shape[0],1)).astype(int), values = X, axis = 1)\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n \nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\nsc_y = StandardScaler()\n#y_train = sc_y.fit_transform(y_train)\n#y_test = sc_y.fit_transform(y_test)\n\n \nlin_mod= linear_model(X_train, y_train)\nlin_mod.fit()\nprint(lin_mod.predict(X_test))\nprint(y_test)",
"(40, 1)\n[[ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]\n [ 0.]]\n[[ 0. ]\n [ -415.38222603]\n [ 333.57777773]\n [ 35726.28774249]\n [ 851.30163448]\n [ 4519.88277698]]\n[[ -6431.24565204]\n [ 23135.83035816]\n [ 23001.29120174]\n [-37470.34873742]\n [ 69091.03496055]\n [ 6714.79505165]\n [-41594.75515324]\n [-10654.71350313]\n [ 4522.98808012]\n [ 58474.6184455 ]]\n[[ 103282.38]\n [ 144259.4 ]\n [ 146121.95]\n [ 77798.83]\n [ 191050.39]\n [ 105008.31]\n [ 81229.06]\n [ 97483.56]\n [ 110352.25]\n [ 166187.94]]\n"
]
]
] |
[
"code"
] |
[
[
"code"
]
] |
4a2674c403ed36218477402dfe0b45218ceb4c44
| 677,432 |
ipynb
|
Jupyter Notebook
|
CMA-ES/lambda/lambda.ipynb
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 17 |
2016-11-18T03:15:14.000Z
|
2022-01-09T07:50:56.000Z
|
CMA-ES/lambda/lambda.ipynb
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | null | null | null |
CMA-ES/lambda/lambda.ipynb
|
luzhijun/-
|
c9a7f39fd033a3ba3c57acbbd309c05ac17e6bef
|
[
"Apache-2.0"
] | 7 |
2016-11-20T10:20:57.000Z
|
2021-04-20T05:29:57.000Z
| 1,648.253041 | 100,618 | 0.946207 |
[
[
[
"# $\\lambda$对CMA性能影响研究",
"_____no_output_____"
],
[
"<link rel=\"stylesheet\" href=\"http://yandex.st/highlightjs/6.2/styles/googlecode.min.css\">\n \n<script src=\"http://code.jquery.com/jquery-1.7.2.min.js\"></script>\n<script src=\"http://yandex.st/highlightjs/6.2/highlight.min.js\"></script>\n \n<script>hljs.initHighlightingOnLoad();</script>\n<script type=\"text/javascript\">\n $(document).ready(function(){\n $(\"h2,h3,h4,h5,h6\").each(function(i,item){\n var tag = $(item).get(0).localName;\n $(item).attr(\"id\",\"wow\"+i);\n $(\"#category\").append('<a class=\"new'+tag+'\" href=\"#wow'+i+'\">'+$(this).text()+'</a></br>');\n $(\".newh2\").css(\"margin-left\",0);\n $(\".newh3\").css(\"margin-left\",20);\n $(\".newh4\").css(\"margin-left\",40);\n $(\".newh5\").css(\"margin-left\",60);\n $(\".newh6\").css(\"margin-left\",80);\n });\n });\n</script>\n<div id=\"category\"></div>",
"_____no_output_____"
],
[
"**摘要**: $\\lambda$大小影响单次计算时间,根据文档合理的$\\lambda$在[5,2n+10]之间,Hansen给出的推荐值为$4+3\\times \\lfloor ln(N) \\rfloor$,本文固定mu=0.5,sigma=0.3,根据不同的$\\lambda$对不同函数绘图分析.\n### 第一阶段测试\n* 函数:[rosen,bukin,griewank]\n* 最小值:[0,6.82,0]\n* 维度:[130]\n* $\\lambda$:[5,18,20,50,80,110,140]",
"_____no_output_____"
]
],
[
[
"%pylab inline\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport pickle\nplt.rc('figure', figsize=(12, 8))",
"Populating the interactive namespace from numpy and matplotlib\n"
],
[
"with open(\"data.tl\",'r') as f:\n result_list=pickle.load(f)",
"_____no_output_____"
],
[
"def convertdic(result_list):\n res=[{}]\n for row in result_list:\n for i,d in enumerate(res):\n if row[-1] not in d.keys():\n d[row[-1]]=row[:-1]\n break\n if i==len(res)-1:\n res.append({row[-1]:row[:-1]})\n break\n return res\n\ndef draw(title,tail):\n bs=[row[:tail] for row in result_list if row[tail]==title]\n bs=np.array(bs)\n lmax=max(bs[:,-1])\n bs=bs/bs.max(0)\n bs=bs*[1,1,1,1,lmax]\n bs=convertdic(bs)\n df=DataFrame(bs[0],index=['countiter','countevals','result','time(s)'])\n df=df.stack().unstack(0)\n df.columns.name='values'\n df.index.name='lambda'\n df.plot(kind='bar',stacked=False,colormap='jet',alpha=0.9,title=title,figsize=(12,8));\n df.plot(kind='area',stacked=False,colormap='jet',alpha=0.5,title=title,figsize=(12,8),xticks=np.arange(5,lmax,10));\n \ndef drawSigmaLines(t,xl):\n sigmas=[[row[-3],row[-1]] for row in result_list if row[-2]==t]\n ss=map(list,zip(*sigmas))[1]\n M=max(map(len,ss))\n for s in sigmas:\n for i in range(M-len(s[1])):\n s[1].append(None)\n df1=DataFrame({s[0]:s[1] for s in sigmas})\n df1.columns.name='sigma'\n df1.index.name='lambda'\n df1.plot(title=t,fontsize=10,linewidth=2,alpha=0.8,colormap='rainbow',xlim=(0,xl))",
"_____no_output_____"
],
[
"#bukin函数\ndraw('bukin',-1)",
"_____no_output_____"
],
[
"#rosen函数 \ndraw('rosen',-1)",
"_____no_output_____"
],
[
"#griwank函数\ndraw('griewank',-1)",
"_____no_output_____"
]
],
[
[
"### 第二阶段测试\n* 函数:[sphere,cigar,elli]\n* 最小值:[0,0,0]\n* 维度:[208]\n* $\\lambda$:[5,10,14,18,20,22,26,60,100,140,180,220]",
"_____no_output_____"
]
],
[
[
"with open(\"data1.tl\",'r') as f:\n result_list=pickle.load(f)",
"_____no_output_____"
],
[
"#sphere函数\ndraw('sphere',-2)\ndrawSigmaLines('sphere',300)",
"_____no_output_____"
],
[
"#cigar函数\ndraw('cigar',-2)\ndrawSigmaLines('cigar',300)",
"_____no_output_____"
],
[
"#elli函数\ndraw('elli',-2)\ndrawSigmaLines('elli',300)",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
4a26760c01f759b822f842cb058d239bbd228bbc
| 61,801 |
ipynb
|
Jupyter Notebook
|
tutorials/W3D1_BayesianDecisions/ED_W3D1_Tutorial3.ipynb
|
eduardojdiniz/CompNeuro
|
20269e66540dc4e802273735c97323020ee37406
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tutorials/W3D1_BayesianDecisions/ED_W3D1_Tutorial3.ipynb
|
eduardojdiniz/CompNeuro
|
20269e66540dc4e802273735c97323020ee37406
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null |
tutorials/W3D1_BayesianDecisions/ED_W3D1_Tutorial3.ipynb
|
eduardojdiniz/CompNeuro
|
20269e66540dc4e802273735c97323020ee37406
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | null | null | null | 40.026554 | 719 | 0.578275 |
[
[
[
"<a href=\"https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial3.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"# Bonus Tutorial : Fitting to data\n**Week 3, Day 1: Bayesian Decisions**\n\n**By Neuromatch Academy**\n\n__Content creators:__ Vincent Valton, Konrad Kording\n\n__Content reviewers:__ Matt Krause, Jesse Livezey, Karolina Stosio, Saeed Salehi, Michael Waskom\n\n##**Note: This is bonus material, included from NMA 2020. It has not been substantially revised for 2021.**\nThis means that the notation and standards are slightly different and some of the references to other days in NMA are outdated. We include it here because it covers fitting Bayesian models to data, which may be of interest to many students.\n",
"_____no_output_____"
],
[
"**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**\n\n<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>",
"_____no_output_____"
],
[
"---\n# Tutorial objectives\n \nIn the first two tutorials, we learned about Bayesian models and decisions more intuitively, using demos. In this notebook, we will dive into using math and code to fit Bayesian models to data. \n\nWe'll have a look at computing all the necessary steps to perform model inversion (estimate the model parameters such as $p_{common}$ that generated data similar to that of a participant). We will describe all the steps of the generative model first, and in the last exercise we will use all these steps to estimate the parameter $p_{common}$ of a single participant using simulated data. \n\nThe generative model will be a Bayesian model we saw in Tutorial 2: a mixture of Gaussian prior and a Gaussian likelihood.\nSteps:\n\n* First, we'll create the prior, likelihood, posterior, etc in a form that will make it easier for us to visualise what is being computed and estimated at each step of the generative model: \n 1. Creating a mixture of Gaussian prior for multiple possible stimulus inputs\n 2. Generating the likelihood for multiple possible stimulus inputs\n 3. Estimating our posterior as a function of the stimulus input\n 4. Estimating a participant response given the posterior\n \n* Next, we'll perform the model inversion/fitting:\n 5. Create an distribution for the input as a function of possible inputs\n 6. Marginalization\n 7. Generate some data using the generative model provided\n 8. Perform model inversion (model fitting) using the generated data and see if you recover the orignal parameters.\n",
"_____no_output_____"
],
[
"---\n# Setup\n\nPlease execute the cell below to initialize the notebook environment",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom scipy.optimize import minimize",
"_____no_output_____"
],
[
"#@title Figure Settings\nimport ipywidgets as widgets\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\nplt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/course-content/NMA2020/nma.mplstyle\")",
"_____no_output_____"
],
[
"# @title Helper Functions\n\ndef my_gaussian(x_points, mu, sigma):\n \"\"\"\n Returns a Gaussian estimated at points `x_points`, with parameters: `mu` and `sigma`\n\n Args :\n x_points (numpy arrays of floats)- points at which the gaussian is evaluated\n mu (scalar) - mean of the Gaussian\n sigma (scalar) - std of the gaussian\n\n Returns:\n Gaussian evaluated at `x`\n \"\"\"\n p = np.exp(-(x_points-mu)**2/(2*sigma**2))\n return p / sum(p)\n\ndef moments_myfunc(x_points, function):\n \"\"\"\n DO NOT EDIT THIS FUNCTION !!!\n\n Returns the mean, median and mode of an arbitrary function\n\n Args :\n x_points (numpy array of floats) - x-axis values\n function (numpy array of floats) - y-axis values of the function evaluated at `x_points`\n\n Returns:\n (tuple of 3 scalars): mean, median, mode\n \"\"\"\n\n # Calc mode of arbitrary function\n mode = x_points[np.argmax(function)]\n\n # Calc mean of arbitrary function\n mean = np.sum(x_points * function)\n\n # Calc median of arbitrary function\n cdf_function = np.zeros_like(x_points)\n accumulator = 0\n for i in np.arange(x_points.shape[0]):\n accumulator = accumulator + function[i]\n cdf_function[i] = accumulator\n idx = np.argmin(np.abs(cdf_function - 0.5))\n median = x_points[idx]\n\n return mean, median, mode\n\ndef plot_myarray(array, xlabel, ylabel, title):\n \"\"\" Plot an array with labels.\n\n Args :\n array (numpy array of floats)\n xlabel (string) - label of x-axis\n ylabel (string) - label of y-axis\n title (string) - title of plot\n\n Returns:\n None\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n colormap = ax.imshow(array, extent=[-10, 10, 8, -8])\n cbar = plt.colorbar(colormap, ax=ax)\n cbar.set_label('probability')\n ax.invert_yaxis()\n ax.set_xlabel(xlabel)\n ax.set_title(title)\n ax.set_ylabel(ylabel)\n ax.set_aspect('auto')\n return None\n\ndef plot_my_bayes_model(model) -> None:\n \"\"\"Pretty-print a simple Bayes Model (ex 7), defined as a function:\n\n Args:\n - model: function that takes a single parameter value and returns\n the negative log-likelihood of the model, given that parameter\n Returns:\n None, draws plot\n \"\"\"\n x = np.arange(-10,10,0.07)\n\n # Plot neg-LogLikelihood for different values of alpha\n alpha_tries = np.arange(0.01, 0.3, 0.01)\n nll = np.zeros_like(alpha_tries)\n for i_try in np.arange(alpha_tries.shape[0]):\n nll[i_try] = model(np.array([alpha_tries[i_try]]))\n\n plt.figure()\n plt.plot(alpha_tries, nll)\n plt.xlabel('p_independent value')\n plt.ylabel('negative log-likelihood')\n\n # Mark minima\n ix = np.argmin(nll)\n plt.scatter(alpha_tries[ix], nll[ix], c='r', s=144)\n\n #plt.axvline(alpha_tries[np.argmin(nll)])\n plt.title('Sample Output')\n plt.show()\n\n return None\n\n\ndef plot_simulated_behavior(true_stim, behaviour):\n fig = plt.figure(figsize=(7, 7))\n ax = fig.add_subplot(1,1,1)\n ax.set_facecolor('xkcd:light grey')\n plt.plot(true_stim, true_stim - behaviour, '-k', linewidth=2, label='data')\n plt.axvline(0, ls='dashed', color='grey')\n plt.axhline(0, ls='dashed', color='grey')\n plt.legend()\n plt.xlabel('Position of true visual stimulus (cm)')\n plt.ylabel('Participant deviation from true stimulus (cm)')\n plt.title('Participant behavior')\n plt.show()\n\n return None",
"_____no_output_____"
]
],
[
[
"---\n# Introduction\n",
"_____no_output_____"
]
],
[
[
"# @title Video 1: Intro\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV13g4y1i7je\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"YSKDhnbjKmA\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"\n\n\nHere is a graphical representation of the generative model:\n\n 1. We present a stimulus $x$ to participants. \n 2. The brain encodes this true stimulus $x$ noisily (this is the brain's representation of the true visual stimulus: $p(\\tilde x|x)$.\n 3. The brain then combine this brain encoded stimulus (likelihood: $p(\\tilde x|x)$) with prior information (the prior: $p(x)$) to make up the brain's estimated position of the true visual stimulus, the posterior: $p(x|\\tilde x)$. \n 3. This brain's estimated stimulus position: $p(x|\\tilde x)$, is then used to make a response: $\\hat x$, which is the participant's noisy estimate of the stimulus position (the participant's percept). \n \nTypically the response $\\hat x$ also includes some motor noise (noise due to the hand/arm move being not 100% accurate), but we'll ignore it in this tutorial and assume there is no motor noise.\n\n\n\nWe will use the same experimental setup as in [tutorial 2](https://colab.research.google.com/drive/15pbgrfGjSKbUQoX51RdcNe3UXb4R5RRx#scrollTo=tF5caxVGYURh) but with slightly different probabilities. This time, participants are told that they need to estimate the sound location of a puppet that is hidden behind a curtain. The participants are told to use auditory information and are also informed that the sound could come from 2 possible causes: a common cause (95% of the time it comes from the puppet hidden behind the curtain at position 0), or an independent cause (5% of the time the sound comes from loud-speakers at more distant locations).",
"_____no_output_____"
],
[
"---\n# Section 1: Likelihood array\n \nFirst, we want to create a likelihood, but for the sake of visualization (and to consider all possible brain encodings) we will create multiple likelihoods $f(x)=p(\\tilde x|x)$ (one for each potential encoded stimulus: $\\tilde x$). We will then be able to visualize the likelihood as a function of hypothesized true stimulus positions: $x$ on the x-axis and encoded position $\\tilde x$ on the y-axis.\n\n\n Using the equation for the `my_gaussian` and the values in `hypothetical_stim`:\n* Create a Gaussian likelihood with mean varying from `hypothetical_stim`, keeping $\\sigma_{likelihood}$ constant at 1.\n* Each likelihood will have a different mean and thus a different row-likelihood of your 2D array, such that you end up with a likelihood array made up of 1,000 row-Gaussians with different means. (_Hint_: `np.tile` won't work here. You may need a for-loop).\n* Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script",
"_____no_output_____"
],
[
"### Exercise 1. Implement the auditory likelihood as a function of true stimulus position",
"_____no_output_____"
]
],
[
[
"x = np.arange(-10, 10, 0.1)\nhypothetical_stim = np.linspace(-8, 8, 1000)\n\ndef compute_likelihood_array(x_points, stim_array, sigma=1.):\n\n # initializing likelihood_array\n likelihood_array = np.zeros((len(stim_array), len(x_points)))\n\n # looping over stimulus array\n for i in range(len(stim_array)):\n ########################################################################\n ## Insert your code here to:\n ## - Generate a likelihood array using `my_gaussian` function,\n ## with std=1, and varying the mean using `stim_array` values.\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ########################################################################\n likelihood_array[i, :] = ...\n\n return likelihood_array\n\n# Uncomment following lines to test your code\n# likelihood_array = compute_likelihood_array(x, hypothetical_stim)\n# plot_myarray(likelihood_array,\n# '$x$ : Potential true stimulus $x$',\n# 'Possible brain encoding $\\~x$',\n# 'Likelihood as a function of $\\~x$ : $p(\\~x | x)$')",
"_____no_output_____"
],
[
"# to_remove solution\nx = np.arange(-10, 10, 0.1)\nhypothetical_stim = np.linspace(-8, 8, 1000)\n\ndef compute_likelihood_array(x_points, stim_array, sigma=1.):\n\n # initializing likelihood_array\n likelihood_array = np.zeros((len(stim_array), len(x_points)))\n\n # looping over stimulus array\n for i in range(len(stim_array)):\n likelihood_array[i, :] = my_gaussian(x_points, stim_array[i], sigma)\n\n return likelihood_array\n\nlikelihood_array = compute_likelihood_array(x, hypothetical_stim)\n\nwith plt.xkcd():\n plot_myarray(likelihood_array,\n '$x$ : Potential true stimulus $x$',\n 'Possible brain encoding $\\~x$',\n 'Likelihood as a function of $\\~x$ : $p(\\~x | x)$')",
"_____no_output_____"
]
],
[
[
"---\n# Section 2: Causal mixture of Gaussian prior\n",
"_____no_output_____"
]
],
[
[
"# @title Video 2: Prior array\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1WA411e7gM\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"F0IYpUicXu4\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"\nAs in Tutorial 2, we want to create a prior that will describe the participants' prior knowledge that, 95% of the time sounds come from a common position around the puppet, while during the remaining 5% of the time, they arise from another independent position. We will embody this information into a prior using a mixture of Gaussians. For visualization reasons, we will create a prior that has the same shape (form) as the likelihood array we created in the previous exercise. That is, we want to create a mixture of Gaussian prior as a function the the brain encoded stimulus $\\tilde x$. Since the prior does not change as a function of $\\tilde x$ it will be identical for each row of the prior 2D array. \n\nUsing the equation for the Gaussian `my_gaussian`:\n* Generate a Gaussian $Common$ with mean 0 and standard deviation 0.5\n* Generate another Gaussian $Independent$ with mean 0 and standard deviation 10\n* Combine the two Gaussians (Common + Independent) to make a new prior by mixing the two Gaussians with mixing parameter $p_{independent}$ = 0.05. Make it such that the peakier Gaussian has 95% of the weight (don't forget to normalize afterwards)\n* This will be the first row of your prior 2D array\n* Now repeat this for varying brain encodings $\\tilde x$. Since the prior does not depend on $\\tilde x$ you can just repeat the prior for each $\\tilde x$ (hint: use np.tile) that row prior to make an array of 1,000 (i.e. `hypothetical_stim.shape[0]`) row-priors.\n* Plot the matrix using the function `plot_myarray()` already pre-written and commented-out in your script",
"_____no_output_____"
],
[
"### Exercise 2: Implement the prior array",
"_____no_output_____"
]
],
[
[
"x = np.arange(-10, 10, 0.1)\n\ndef calculate_prior_array(x_points, stim_array, p_indep,\n prior_mean_common=.0, prior_sigma_common=.5,\n prior_mean_indep=.0, prior_sigma_indep=10):\n \"\"\"\n 'common' stands for common\n 'indep' stands for independent\n \"\"\"\n\n prior_common = my_gaussian(x_points, prior_mean_common, prior_sigma_common)\n prior_indep = my_gaussian(x_points, prior_mean_indep, prior_sigma_indep)\n\n ############################################################################\n ## Insert your code here to:\n ## - Create a mixture of gaussian priors from 'prior_common'\n ## and 'prior_indep' with mixing parameter 'p_indep'\n ## - normalize\n ## - repeat the prior array and reshape it to make a 2D array\n ## of 1000 rows of priors (Hint: use np.tile() and np.reshape())\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ############################################################################\n\n prior_mixed = ...\n prior_mixed /= ... # normalize\n\n prior_array = np.tile(...).reshape(...)\n return prior_array\n\np_independent=.05\n# Uncomment following lines, once the task is complete.\n# prior_array = calculate_prior_array(x, hypothetical_stim, p_independent)\n# plot_myarray(prior_array,\n# 'Hypothesized position $x$', 'Brain encoded position $\\~x$',\n# 'Prior as a fcn of $\\~x$ : $p(x|\\~x)$')",
"_____no_output_____"
],
[
"# to_remove solution\nx = np.arange(-10, 10, 0.1)\n\ndef calculate_prior_array(x_points, stim_array, p_indep,\n prior_mean_common=.0, prior_sigma_common=.5,\n prior_mean_indep=.0, prior_sigma_indep=10):\n \"\"\"\n 'common' stands for common\n 'indep' stands for independent\n \"\"\"\n\n prior_common = my_gaussian(x_points, prior_mean_common, prior_sigma_common)\n prior_indep = my_gaussian(x_points, prior_mean_indep, prior_sigma_indep)\n\n prior_mixed = (1 - p_indep) * prior_common + (p_indep * prior_indep)\n prior_mixed /= np.sum(prior_mixed) # normalize\n\n prior_array = np.tile(prior_mixed, len(stim_array)).reshape(len(stim_array), -1)\n return prior_array\n\np_independent=.05\nprior_array = calculate_prior_array(x, hypothetical_stim, p_independent)\n\nwith plt.xkcd():\n plot_myarray(prior_array,\n 'Hypothesized position $x$', 'Brain encoded position $\\~x$',\n 'Prior as a fcn of $\\~x$ : $p(x|\\~x)$')",
"_____no_output_____"
]
],
[
[
"---\n# Section 3: Bayes rule and Posterior array",
"_____no_output_____"
]
],
[
[
"# @title Video 3: Posterior array\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV18K411H7Tc\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"HpOzXZUKFJc\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"We now want to calcualte the posterior using *Bayes Rule*. Since we have already created a likelihood and a prior for each brain encoded position $\\tilde x$, all we need to do is to multiply them row-wise. That is, each row of the posterior array will be the posterior resulting from the multiplication of the prior and likelihood of the same equivalent row.\n\nMathematically:\n\n\\begin{eqnarray}\n Posterior\\left[i, :\\right] \\propto Likelihood\\left[i, :\\right] \\odot Prior\\left[i, :\\right]\n\\end{eqnarray}\n\nwhere $\\odot$ represents the [Hadamard Product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (i.e., elementwise multiplication) of the corresponding prior and likelihood row vectors `i` from each matrix.\n\nFollow these steps to build the posterior as a function of the brain encoded stimulus $\\tilde x$:\n* For each row of the prior and likelihood (i.e. each possible brain encoding $\\tilde x$), fill in the posterior matrix so that every row of the posterior array represents the posterior density for a different brain encode $\\tilde x$.\n* Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script\n\nOptional:\n* Do you need to operate on one element--or even one row--at a time? NumPy operations can often process an entire matrix in a single \"vectorized\" operation. This approach is often much faster and much easier to read than an element-by-element calculation. Try to write a vectorized version that calculates the posterior without using any for-loops. _Hint_: look at `np.sum` and its keyword arguments.",
"_____no_output_____"
],
[
"### Exercise 3: Calculate the posterior as a function of the hypothetical stimulus x",
"_____no_output_____"
]
],
[
[
"def calculate_posterior_array(prior_array, likelihood_array):\n\n ############################################################################\n ## Insert your code here to:\n ## - calculate the 'posterior_array' from the given\n ## 'prior_array', 'likelihood_array'\n ## - normalize\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ############################################################################\n posterior_array = ...\n posterior_array /= ... # normalize each row separately\n\n return posterior_array\n\n# Uncomment following lines, once the task is complete.\n# posterior_array = calculate_posterior_array(prior_array, likelihood_array)\n# plot_myarray(posterior_array,\n# 'Hypothesized Position $x$',\n# 'Brain encoded Stimulus $\\~x$',\n# 'Posterior as a fcn of $\\~x$ : $p(x | \\~x)$')",
"_____no_output_____"
],
[
"# to_remove solution\ndef calculate_posterior_array(prior_array, likelihood_array):\n\n posterior_array = prior_array * likelihood_array\n posterior_array /= posterior_array.sum(axis=1, keepdims=True) # normalize each row separately\n\n return posterior_array\n\nposterior_array = calculate_posterior_array(prior_array, likelihood_array)\nwith plt.xkcd():\n plot_myarray(posterior_array,\n 'Hypothesized Position $x$',\n 'Brain encoded Stimulus $\\~x$',\n 'Posterior as a fcn of $\\~x$ : $p(x | \\~x)$')",
"_____no_output_____"
]
],
[
[
"---\n# Section 4: Estimating the position $\\hat x$",
"_____no_output_____"
]
],
[
[
"# @title Video 4: Binary decision matrix\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1sZ4y1u74e\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"gy3GmlssHgQ\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"Now that we have a posterior distribution (for each possible brain encoding $\\tilde x$)that represents the brain's estimated stimulus position: $p(x|\\tilde x)$, we want to make an estimate (response) of the sound location $\\hat x$ using the posterior distribution. This would represent the subject's estimate if their (for us as experimentalist unobservable) brain encoding took on each possible value. \n\nThis effectively encodes the *decision* that a participant would make for a given brain encoding $\\tilde x$. In this exercise, we make the assumptions that participants take the mean of the posterior (decision rule) as a response estimate for the sound location (use the function `moments_myfunc()` provided to calculate the mean of the posterior).\n\nUsing this knowledge, we will now represent $\\hat x$ as a function of the encoded stimulus $\\tilde x$. This will result in a 2D binary decision array. To do so, we will scan the posterior matrix (i.e. row-wise), and set the array cell value to 1 at the mean of the row-wise posterior.\n\n**Suggestions**\n* For each brain encoding $\\tilde x$ (row of the posterior array), calculate the mean of the posterior, and set the corresponding cell of the binary decision array to 1. (e.g., if the mean of the posterior is at position 0, then set the cell with x_column == 0 to 1).\n* Plot the matrix using the function `plot_myarray()` already pre-written and commented-out in your script",
"_____no_output_____"
],
[
"### Exercise 4: Calculate the estimated response as a function of the hypothetical stimulus x",
"_____no_output_____"
]
],
[
[
"def calculate_binary_decision_array(x_points, posterior_array):\n\n binary_decision_array = np.zeros_like(posterior_array)\n\n for i in range(len(posterior_array)):\n\n ########################################################################\n ## Insert your code here to:\n ## - For each hypothetical stimulus x (row of posterior),\n ## calculate the mean of the posterior using the povided function\n ## `moments_myfunc()`, and set the corresponding cell of the\n ## Binary Decision array to 1.\n ## Hint: you can run 'help(moments_myfunc)' to see the docstring\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ########################################################################\n # calculate mean of posterior using 'moments_myfunc'\n mean, _, _ = ...\n # find the postion of mean in x_points (closest position)\n idx = ...\n binary_decision_array[i, idx] = 1\n\n return binary_decision_array\n\n# Uncomment following lines, once the task is complete.\n# binary_decision_array = calculate_binary_decision_array(x, posterior_array)\n# plot_myarray(binary_decision_array,\n# 'Chosen position $\\hat x$', 'Brain-encoded Stimulus $\\~ x$',\n# 'Sample Binary Decision Array')",
"_____no_output_____"
],
[
"# to_remove solution\ndef calculate_binary_decision_array(x_points, posterior_array):\n\n binary_decision_array = np.zeros_like(posterior_array)\n\n for i in range(len(posterior_array)):\n # calculate mean of posterior using 'moments_myfunc'\n mean, _, _ = moments_myfunc(x_points, posterior_array[i])\n # find the postion of mean in x_points (closest position)\n idx = np.argmin(np.abs(x_points - mean))\n binary_decision_array[i, idx] = 1\n\n return binary_decision_array\n\nbinary_decision_array = calculate_binary_decision_array(x, posterior_array)\nwith plt.xkcd():\n plot_myarray(binary_decision_array,\n 'Chosen position $\\hat x$', 'Brain-encoded Stimulus $\\~ x$',\n 'Sample Binary Decision Array')",
"_____no_output_____"
]
],
[
[
"---\n# Section 5: Probabilities of encoded stimuli",
"_____no_output_____"
]
],
[
[
"# @title Video 5: Input array\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1pT4y1E7wv\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"C1d1n_Si83o\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"Because we as experimentalists can not know the encoding $\\tilde x$ of the stimulus $x$ that we do know, we had to compute the binary decision array for each possible encoding. \n\nFirst however, we need to calculate how likely each possible encoding is given the true stimulus. That is, we will now create a Gaussian centered around the true presented stimulus, with $\\sigma = 1$, and repeat that gaussian distribution across as a function of potentially encoded values $\\tilde x$. That is, we want to make a *column* gaussian centered around the true presented stimulus, and repeat this *column* Gaussian across all hypothetical stimulus values $x$.\n\nThis, effectively encodes the distribution of the brain encoded stimulus (one single simulus, which we as experimentalists know) and enable us to link the true stimulus $x$, to potential encodings $\\tilde x$.\n\n**Suggestions**\n\nFor this exercise, we will assume the true stimulus is presented at direction 2.5\n* Create a Gaussian likelihood with $\\mu = 2.5$ and $\\sigma = 1.0$\n* Make this the first column of your array and repeat that *column* to fill in the true presented stimulus input as a function of hypothetical stimulus locations.\n* Plot the array using the function `plot_myarray()` already pre-written and commented-out in your script",
"_____no_output_____"
],
[
"### Exercise 5: Generate an input as a function of hypothetical stimulus x",
"_____no_output_____"
]
],
[
[
"def generate_input_array(x_points, stim_array, posterior_array,\n mean=2.5, sigma=1.):\n\n input_array = np.zeros_like(posterior_array)\n\n ########################################################################\n ## Insert your code here to:\n ## - Generate a gaussian centered on the true stimulus 2.5\n ## and sigma = 1. for each column\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ########################################################################\n for i in range(len(x_points)):\n input_array[:, i] = ...\n\n return input_array\n\n# Uncomment following lines, once the task is complete.\n# input_array = generate_input_array(x, hypothetical_stim, posterior_array)\n# plot_myarray(input_array,\n# 'Hypothetical Stimulus $x$', '$\\~x$',\n# 'Sample Distribution over Encodings:\\n $p(\\~x | x = 2.5)$')",
"_____no_output_____"
],
[
"# to_remove solution\ndef generate_input_array(x_points, stim_array, posterior_array,\n mean=2.5, sigma=1.):\n\n input_array = np.zeros_like(posterior_array)\n\n for i in range(len(x_points)):\n input_array[:, i] = my_gaussian(stim_array, mean, sigma)\n\n return input_array\n\ninput_array = generate_input_array(x, hypothetical_stim, posterior_array)\nwith plt.xkcd():\n plot_myarray(input_array,\n 'Hypothetical Stimulus $x$', '$\\~x$',\n 'Sample Distribution over Encodings:\\n $p(\\~x | x = 2.5)$')",
"_____no_output_____"
]
],
[
[
"---\n# Section 6: Normalization and expected estimate distribution",
"_____no_output_____"
]
],
[
[
"# @title Video 6: Marginalization\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1qz4y1D71K\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"5alwtNS4CGw\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"Now that we have a true stimulus $x$ and a way to link it to potential encodings, we will be able to calculate the distribution of encodings and ultimately estimates. To integrate over all possible hypothetical values of $\\tilde x$ we marginalize, that is, we first compute the dot-product from the true presented stimulus and our binary decision array and then sum over x. \n\nMathematically, this means that we want to compute:\n\n\\begin{eqnarray}\n Marginalization Array = Input Array \\odot Binary Decision Array\n\\end{eqnarray}\n\n\\begin{eqnarray}\n Marginal = \\int_{\\tilde x} Marginalization Array\n\\end{eqnarray}\n\nSince we are performing integration over discrete values using arrays for visualization purposes, the integration reduces to a simple sum over $\\tilde x$.\n\n**Suggestions**\n\n* For each row of the input and binary arrays, calculate product of the two and fill in the 2D marginal array.\n* Plot the result using the function `plot_myarray()` already pre-written and commented-out in your script\n* Calculate and plot the marginal over `x` using the code snippet commented out in your script\n - Note how the limitations of numerical integration create artifacts on your marginal ",
"_____no_output_____"
],
[
"### Exercise 6: Implement the marginalization matrix",
"_____no_output_____"
]
],
[
[
"def my_marginalization(input_array, binary_decision_array):\n\n ############################################################################\n ## Insert your code here to:\n ## - Compute 'marginalization_array' by multiplying pointwise the Binary\n ## decision array over hypothetical stimuli and the Input array\n ## - Compute 'marginal' from the 'marginalization_array' by summing over x\n ## (hint: use np.sum() and only marginalize along the columns)\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ############################################################################\n\n marginalization_array = ...\n marginal = ... # note axis\n marginal /= ... # normalize\n\n return marginalization_array, marginal\n\n# Uncomment following lines, once the task is complete.\n# marginalization_array, marginal = my_marginalization(input_array, binary_decision_array)\n# plot_myarray(marginalization_array, 'estimated $\\hat x$', '$\\~x$', 'Marginalization array: $p(\\^x | \\~x)$')\n# plt.figure()\n# plt.plot(x, marginal)\n# plt.xlabel('$\\^x$')\n# plt.ylabel('probability')\n# plt.show()",
"_____no_output_____"
],
[
"# to_remove solution\ndef my_marginalization(input_array, binary_decision_array):\n\n marginalization_array = input_array * binary_decision_array\n marginal = np.sum(marginalization_array, axis=0) # note axis\n marginal /= marginal.sum() # normalize\n\n return marginalization_array, marginal\n\nmarginalization_array, marginal = my_marginalization(input_array, binary_decision_array)\nwith plt.xkcd():\n plot_myarray(marginalization_array, 'estimated $\\hat x$', '$\\~x$', 'Marginalization array: $p(\\^x | \\~x)$')\n plt.figure()\n plt.plot(x, marginal)\n plt.xlabel('$\\^x$')\n plt.ylabel('probability')\n plt.show()",
"_____no_output_____"
]
],
[
[
"---\n# Generate some data\n\nWe have seen how to calculate the posterior and marginalize to remove $\\tilde x$ and get $p(\\hat{x} \\mid x)$. Next, we will generate some artificial data for a single participant using the `generate_data()` function provided, and mixing parameter $p_{independent} = 0.1$. \n\nOur goal in the next exercise will be to recover that parameter. These parameter recovery experiments are a powerful method for planning and debugging Bayesian analyses--if you cannot recover the given parameters, something has gone wrong! Note that this value for $p_{independent}$ is not quite the same as our prior, which used $p_{independent} = 0.05.$ This lets us test out the complete model. \n\nPlease run the code below to generate some synthetic data. You do not need to edit anything, but check that the plot below matches what you would expect from the video. ",
"_____no_output_____"
]
],
[
[
"#@title\n#@markdown #### Run the 'generate_data' function (this cell)\ndef generate_data(x_stim, p_independent):\n \"\"\"\n DO NOT EDIT THIS FUNCTION !!!\n\n Returns generated data using the mixture of Gaussian prior with mixture\n parameter `p_independent`\n\n Args :\n x_stim (numpy array of floats) - x values at which stimuli are presented\n p_independent (scalar) - mixture component for the Mixture of Gaussian prior\n\n Returns:\n (numpy array of floats): x_hat response of participant for each stimulus\n \"\"\"\n x = np.arange(-10,10,0.1)\n x_hat = np.zeros_like(x_stim)\n\n prior_mean = 0\n prior_sigma1 = .5\n prior_sigma2 = 3\n prior1 = my_gaussian(x, prior_mean, prior_sigma1)\n prior2 = my_gaussian(x, prior_mean, prior_sigma2)\n\n prior_combined = (1-p_independent) * prior1 + (p_independent * prior2)\n prior_combined = prior_combined / np.sum(prior_combined)\n\n for i_stim in np.arange(x_stim.shape[0]):\n likelihood_mean = x_stim[i_stim]\n likelihood_sigma = 1\n likelihood = my_gaussian(x, likelihood_mean, likelihood_sigma)\n likelihood = likelihood / np.sum(likelihood)\n\n posterior = np.multiply(prior_combined, likelihood)\n posterior = posterior / np.sum(posterior)\n\n # Assumes participant takes posterior mean as 'action'\n x_hat[i_stim] = np.sum(x * posterior)\n return x_hat\n\n# Generate data for a single participant\ntrue_stim = np.array([-8, -4, -3, -2.5, -2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2,\n 2.5, 3, 4, 8])\nbehaviour = generate_data(true_stim, 0.10)\n\nplot_simulated_behavior(true_stim, behaviour)",
"_____no_output_____"
]
],
[
[
"---\n# Section 7: Model fitting",
"_____no_output_____"
]
],
[
[
"# @title Video 7: Log likelihood\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1Yf4y1R7ST\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"jbYauFpyZhs\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"Now that we have generated some data, we will attempt to recover the parameter $p_{independent}$ that was used to generate it.\n\nWe have provided you with an incomplete function called `my_Bayes_model_mse()` that needs to be completed to perform the same computations you have performed in the previous exercises but over all the participant's trial, as opposed to a single trial.\n\nThe likelihood has already been constructed; since it depends only on the hypothetical stimuli, it will not change. However, we will have to implement the prior matrix, since it depends on $p_{independent}$. We will therefore have to recompute the posterior, input and the marginal in order to get $p(\\hat{x} \\mid x)$. \n\nUsing $p(\\hat{x} \\mid x)$, we will then compute the negative log-likelihood for each trial and find the value of $p_{independent}$ that minimizes the negative log-likelihood (i.e. maximises the log-likelihood. See the model fitting tutorial from W1D3 for a refresher).\n\nIn this experiment, we assume that trials are independent from one another. This is a common assumption--and it's often even true! It allows us to define negative log-likelihood as:\n\n\\begin{eqnarray}\n -LL = - \\sum_i \\log p(\\hat{x}_i \\mid x_i)\n\\end{eqnarray}\n\nwhere $\\hat{x}_i$ is the participant's response for trial $i$, with presented stimulus $x_i$ \n\n* Complete the function `my_Bayes_model_mse`, we've already pre-completed the function to give you the prior, posterior, and input arrays on each trial\n* Compute the marginalization array as well as the marginal on each trial\n* Compute the negative log likelihood using the marginal and the participant's response\n* Using the code snippet commented out in your script to loop over possible values of $p_{independent}$\n",
"_____no_output_____"
],
[
"### Exercise 7: Fitting a model to generated data\n\n\n",
"_____no_output_____"
]
],
[
[
"def my_Bayes_model_mse(params):\n \"\"\"\n Function fits the Bayesian model from Tutorial 4\n\n Args :\n params (list of positive floats): parameters used by the model\n (params[0] = posterior scaling)\n\n Returns :\n (scalar) negative log-likelihood :sum of log probabilities\n \"\"\"\n\n # Create the prior array\n p_independent=params[0]\n prior_array = calculate_prior_array(x,\n hypothetical_stim,\n p_independent,\n prior_sigma_indep= 3.)\n\n # Create posterior array\n posterior_array = calculate_posterior_array(prior_array, likelihood_array)\n\n # Create Binary decision array\n binary_decision_array = calculate_binary_decision_array(x, posterior_array)\n\n # we will use trial_ll (trial log likelihood) to register each trial\n trial_ll = np.zeros_like(true_stim)\n\n # Loop over stimuli\n for i_stim in range(len(true_stim)):\n\n # create the input array with true_stim as mean\n input_array = np.zeros_like(posterior_array)\n for i in range(len(x)):\n input_array[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1)\n input_array[:, i] = input_array[:, i] / np.sum(input_array[:, i])\n\n # calculate the marginalizations\n marginalization_array, marginal = my_marginalization(input_array,\n binary_decision_array)\n\n action = behaviour[i_stim]\n idx = np.argmin(np.abs(x - action))\n\n ########################################################################\n ## Insert your code here to:\n ## - Compute the log likelihood of the participant\n ## remove the raise below to test your function\n raise NotImplementedError(\"You need to complete the function!\")\n ########################################################################\n\n # Get the marginal likelihood corresponding to the action\n marginal_nonzero = ... + np.finfo(float).eps # avoid log(0)\n trial_ll[i_stim] = np.log(marginal_nonzero)\n\n neg_ll = - trial_ll.sum()\n\n return neg_ll\n\n# Uncomment following lines, once the task is complete.\n# plot_my_bayes_model(my_Bayes_model_mse)",
"_____no_output_____"
],
[
"# to_remove solution\ndef my_Bayes_model_mse(params):\n \"\"\"\n Function fits the Bayesian model from Tutorial 4\n\n Args :\n params (list of positive floats): parameters used by the model\n (params[0] = posterior scaling)\n\n Returns :\n (scalar) negative log-likelihood :sum of log probabilities\n \"\"\"\n\n # Create the prior array\n p_independent=params[0]\n prior_array = calculate_prior_array(x,\n hypothetical_stim,\n p_independent,\n prior_sigma_indep= 3.)\n\n # Create posterior array\n posterior_array = calculate_posterior_array(prior_array, likelihood_array)\n\n # Create Binary decision array\n binary_decision_array = calculate_binary_decision_array(x, posterior_array)\n\n # we will use trial_ll (trial log likelihood) to register each trial\n trial_ll = np.zeros_like(true_stim)\n\n # Loop over stimuli\n for i_stim in range(len(true_stim)):\n\n # create the input array with true_stim as mean\n input_array = np.zeros_like(posterior_array)\n for i in range(len(x)):\n input_array[:, i] = my_gaussian(hypothetical_stim, true_stim[i_stim], 1)\n input_array[:, i] = input_array[:, i] / np.sum(input_array[:, i])\n\n # calculate the marginalizations\n marginalization_array, marginal = my_marginalization(input_array,\n binary_decision_array)\n\n action = behaviour[i_stim]\n idx = np.argmin(np.abs(x - action))\n\n # Get the marginal likelihood corresponding to the action\n marginal_nonzero = marginal[idx] + np.finfo(float).eps # avoid log(0)\n trial_ll[i_stim] = np.log(marginal_nonzero)\n\n neg_ll = - trial_ll.sum()\n\n return neg_ll\n\nwith plt.xkcd():\n plot_my_bayes_model(my_Bayes_model_mse)",
"_____no_output_____"
]
],
[
[
"# Section 8: Summary",
"_____no_output_____"
]
],
[
[
"# @title Video 8: Outro\nfrom ipywidgets import widgets\n\nout2 = widgets.Output()\nwith out2:\n from IPython.display import IFrame\n class BiliVideo(IFrame):\n def __init__(self, id, page=1, width=400, height=300, **kwargs):\n self.id=id\n src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page)\n super(BiliVideo, self).__init__(src, width, height, **kwargs)\n\n video = BiliVideo(id=\"BV1Hz411v7hJ\", width=854, height=480, fs=1)\n print('Video available at https://www.bilibili.com/video/{0}'.format(video.id))\n display(video)\n\nout1 = widgets.Output()\nwith out1:\n from IPython.display import YouTubeVideo\n video = YouTubeVideo(id=\"F5JfqJonz20\", width=854, height=480, fs=1, rel=0)\n print('Video available at https://youtube.com/watch?v=' + video.id)\n display(video)\n\nout = widgets.Tab([out1, out2])\nout.set_title(0, 'Youtube')\nout.set_title(1, 'Bilibili')\n\ndisplay(out)",
"_____no_output_____"
]
],
[
[
"Congratuations! You found $p_{independent}$, the parameter that describes how much weight subjects assign to the same-cause vs. independent-cause origins of a sound. In the preceeding notebooks, we went through the entire Bayesian analysis pipeline:\n\n* developing a model\n* simulating data, and\n* using Bayes' Rule and marginalization to recover a hidden parameter from the data\n\nThis example was simple, but the same princples can be used to analyze datasets with many hidden variables and complex priors and likelihoods. Bayes' Rule will also play a cruical role in many of the other techniques you will see later this week. \n\n---\n\nIf you're still intrigued as to why we decided to use the mean of the posterior as a decision rule for a response $\\hat{x}$, we have an extra Bonus Tutorial 4 which goes through the most common decision rules and how these rules correspond to minimizing different cost functions.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a268d631711b8b623665d616748dbb0ce972b95
| 12,525 |
ipynb
|
Jupyter Notebook
|
Review_and_more (1).ipynb
|
anachkheidze/ComputationalThinking_Gov_1
|
a540a4ced9e0964bdc7c029a357b2f13e96b0491
|
[
"MIT"
] | null | null | null |
Review_and_more (1).ipynb
|
anachkheidze/ComputationalThinking_Gov_1
|
a540a4ced9e0964bdc7c029a357b2f13e96b0491
|
[
"MIT"
] | null | null | null |
Review_and_more (1).ipynb
|
anachkheidze/ComputationalThinking_Gov_1
|
a540a4ced9e0964bdc7c029a357b2f13e96b0491
|
[
"MIT"
] | null | null | null | 20.072115 | 124 | 0.507944 |
[
[
[
"This material should help you get the ideas clearer from the first meeting:",
"_____no_output_____"
]
],
[
[
"names=[\"Tomás\", \"Pauline\", \"Pablo\", \"Bjork\",\"Alan\",\"Juana\"]\nwoman=[False,True,False,False,False,True]\nages=[32,33,28,30,32,27]\ncountry=[\"Chile\", \"Senegal\", \"Spain\", \"Norway\",\"Peru\",\"Peru\"]\neducation=[\"Bach\", \"Bach\", \"Master\", \"PhD\",\"Bach\",\"Master\"]\n\n# now in a dict:\ndata={'name':names, 'age':ages, 'girl':woman,'born In':country, 'degree':education}\n\n#now into a DF\nimport pandas as pd\n\nfriends=pd.DataFrame.from_dict(data)\n# seeing it:\nfriends",
"_____no_output_____"
]
],
[
[
"The result is what you expected, but you need to be sure of what data structure you have:",
"_____no_output_____"
]
],
[
[
"#what is it?\ntype(friends)",
"_____no_output_____"
],
[
"#this is good\nfriends.age",
"_____no_output_____"
],
[
"#what is it?\ntype(friends.age)",
"_____no_output_____"
],
[
"#this is good\nfriends['age']",
"_____no_output_____"
],
[
"#what is it?\ntype(friends['age'])",
"_____no_output_____"
],
[
"#this is bad\nfriends.iloc[['age']]",
"_____no_output_____"
],
[
"#this is bad\nfriends.loc[['age']]",
"_____no_output_____"
],
[
"#this is bad\nfriends['age','born In']",
"_____no_output_____"
],
[
"#this is good\nfriends[['age','born In']]",
"_____no_output_____"
],
[
"# what is it?\ntype(friends[['age','born In']])",
"_____no_output_____"
],
[
"#this is bad\nfriends.'born In'",
"_____no_output_____"
],
[
"#this is good\nfriends.loc[:,['age','born In']]",
"_____no_output_____"
],
[
"type(friends.loc[:,['age','born In']])",
"_____no_output_____"
],
[
"#this is bad\nfriends.loc[:,['age':'born In']]",
"_____no_output_____"
],
[
"#this is bad\nfriends.iloc[:,['age','born In']]",
"_____no_output_____"
],
[
"# this is good (but different)\nfriends.iloc[:,1:4]",
"_____no_output_____"
],
[
"# what is it?\ntype(friends.iloc[:,1:4])",
"_____no_output_____"
],
[
"# this is good\nfriends.iloc[:,[1,3]]",
"_____no_output_____"
],
[
"#what is it?\ntype(friends.iloc[:,[1,3]])",
"_____no_output_____"
]
],
[
[
"Most of our operations are done on Data frames, because they have several columns and we used that for the subsetting:",
"_____no_output_____"
]
],
[
[
"friends[friends.age>30]",
"_____no_output_____"
]
],
[
[
"Some people like coding with the filter language:",
"_____no_output_____"
]
],
[
[
"# \nfilter1=friends.age>30\nfriends[filter1]",
"_____no_output_____"
],
[
"friends.where(filter1)",
"_____no_output_____"
],
[
"filter1a='age>30'\nfriends.query(filter1a)",
"_____no_output_____"
],
[
"isinstance(friends[filter1], pd.DataFrame), \\\nisinstance(friends.where(filter1), pd.DataFrame), \\\nisinstance(friends.query(filter1a), pd.DataFrame)",
"_____no_output_____"
]
],
[
[
"When you have Boolean values (True/False) you can simplify:",
"_____no_output_____"
]
],
[
[
"#from:\nfriends[friends.girl==False]",
"_____no_output_____"
],
[
"# to...\nfriends[~friends.girl]",
"_____no_output_____"
]
],
[
[
"You can have two filters:",
"_____no_output_____"
]
],
[
[
"# this will not work\nfriends[~friends.girl & friends.degree=='Bach']",
"_____no_output_____"
],
[
"# this will (with parentheses)\nfriends[(~friends.girl) & (friends.degree=='Bach')]",
"_____no_output_____"
]
],
[
[
"Other times you want a values once a filter was applied:",
"_____no_output_____"
]
],
[
[
"# youngest male:\nfriends[(~friends.girl) & (friends.age.min())] # this is wrong!",
"_____no_output_____"
],
[
"friends[(~friends.girl) & (friends.age==friends.age.min())] # this is wrong too!",
"_____no_output_____"
],
[
"friends.age.min()",
"_____no_output_____"
]
],
[
[
"You got empty answer because there is no man aged 27.",
"_____no_output_____"
]
],
[
[
"# this is correct\nfriends[~friends.girl].age.min()",
"_____no_output_____"
]
],
[
[
"Once you know the right age, you have to put it in the right place:",
"_____no_output_____"
]
],
[
[
"friends[friends.age==friends[~friends.girl].age.min()]",
"_____no_output_____"
],
[
"# or\nfriends.where(friends.age==friends[~friends.girl].age.min())",
"_____no_output_____"
],
[
"# or\nfriends.where(friends.age==friends[~friends.girl].age.min()).dropna()",
"_____no_output_____"
]
],
[
[
"The problem is that 'friends' are not subset and the age keeps being that of the youngest woman:",
"_____no_output_____"
]
],
[
[
"# bad:\nfriends.where(~friends.girl).where(friends.age==friends.age.min())",
"_____no_output_____"
]
],
[
[
"That's the advantage of **query**:",
"_____no_output_____"
]
],
[
[
"friends.query('~girl').query('age==age.min()')",
"_____no_output_____"
],
[
"#but\n\nstudents=friends.copy()\n\nstudents.where(~students.girl,inplace=True) #real subset\nstudents.where(students.age==students.age.min())",
"_____no_output_____"
]
],
[
[
"Let's vary the data a little:",
"_____no_output_____"
]
],
[
[
"names=[\"Tomás\", \"Pauline\", \"Pablo\", \"Bjork\",\"Alan\",\"Juana\"]\nwoman=[False,True,False,False,False,True]\nages=[32,28,28,30,32,27]\ncountry=[\"Chile\", \"Senegal\", \"Spain\", \"Norway\",\"Peru\",\"Peru\"]\neducation=[\"Bach\", \"Bach\", \"Master\", \"PhD\",\"Bach\",\"Master\"]\n\n# now in a dict:\ndata={'name':names, 'age':ages, 'girl':woman,'born In':country, 'degree':education}\n\n#now into a DF\nimport pandas as pd\n\nfriends2=pd.DataFrame.from_dict(data)\n# seeing it:\nfriends2",
"_____no_output_____"
]
],
[
[
"There is a girl with the same age as the youngest boy, then:",
"_____no_output_____"
]
],
[
[
"friends2.where(friends2.age==friends2[~friends2.girl].age.min()).dropna()",
"_____no_output_____"
]
],
[
[
"We need a previous strategy:",
"_____no_output_____"
]
],
[
[
"# bad implementation:\nfriends2.where(friends2.age==friends2[~friends2.girl].age.min() & friends2.girl==False).dropna()",
"_____no_output_____"
],
[
"# bad implementation:\nfriends2.where(friends2.age==friends2[~friends2.girl].age.min() & ~friends2.girl).dropna()",
"_____no_output_____"
],
[
"# just parentheses to make it work!\nfriends2.where((friends2.age==friends2[~friends2.girl].age.min()) & (~friends2.girl)).dropna()",
"_____no_output_____"
]
],
[
[
"This one still works!",
"_____no_output_____"
]
],
[
[
"friends2.query('~girl').query('age==age.min()')",
"_____no_output_____"
],
[
"students2=friends2.copy()\n\nstudents2.where(~students2.girl,inplace=True) #real subset\nstudents2.where(students2.age==students2.age.min()).dropna()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"raw",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"raw"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
4a269befc6be0d2c5fd8cc1cf1cbe8be9f8c4be5
| 10,720 |
ipynb
|
Jupyter Notebook
|
examples/topics/network_packets.ipynb
|
philippjfr/datashader
|
eb9218cb810297aea2ae1030349cef6a6f3ab3cb
|
[
"BSD-3-Clause"
] | 1 |
2018-07-17T19:33:43.000Z
|
2018-07-17T19:33:43.000Z
|
examples/topics/network_packets.ipynb
|
philippjfr/datashader
|
eb9218cb810297aea2ae1030349cef6a6f3ab3cb
|
[
"BSD-3-Clause"
] | null | null | null |
examples/topics/network_packets.ipynb
|
philippjfr/datashader
|
eb9218cb810297aea2ae1030349cef6a6f3ab3cb
|
[
"BSD-3-Clause"
] | null | null | null | 32.192192 | 397 | 0.619963 |
[
[
[
"# Graphing network packets",
"_____no_output_____"
],
[
"This notebook currently relies on HoloViews 1.9 or above. Run `conda install -c ioam/label/dev holoviews` to install it.",
"_____no_output_____"
],
[
"## Preparing data",
"_____no_output_____"
],
[
"The data source comes from a publicly available network forensics repository: http://www.netresec.com/?page=PcapFiles. The selected file is https://download.netresec.com/pcap/maccdc-2012/maccdc2012_00000.pcap.gz.\n\n```\ntcpdump -qns 0 -r maccdc2012_00000.pcap | grep tcp > maccdc2012_00000.txt\n```",
"_____no_output_____"
],
[
"For example, here is a snapshot of the resulting output:",
"_____no_output_____"
],
[
"```\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.24.100.1038 > 192.168.202.68.8080: tcp 0\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.27.100.37877 > 192.168.204.45.41936: tcp 0\n09:30:07.780000 IP 192.168.24.100.1038 > 192.168.202.68.8080: tcp 0\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n09:30:07.780000 IP 192.168.202.68.8080 > 192.168.24.100.1038: tcp 1380\n```",
"_____no_output_____"
],
[
"Given the directional nature of network traffic and the numerous ports per node, we will simplify the graph by treating traffic between nodes as undirected and ignorning the distinction between ports. The graph edges will have weights represented by the total number of bytes across both nodes in either direction.",
"_____no_output_____"
],
[
"```\npython pcap_to_parquet.py maccdc2012_00000.txt\n```",
"_____no_output_____"
],
[
"The resulting output will be two Parquet dataframes, `maccdc2012_nodes.parq` and `maccdc2012_edges.parq`.",
"_____no_output_____"
],
[
"## Loading data",
"_____no_output_____"
]
],
[
[
"import holoviews as hv\nimport networkx as nx\nimport dask.dataframe as dd\n\nfrom holoviews.operation.datashader import (\n datashade, dynspread, directly_connect_edges, bundle_graph, stack\n)\nfrom holoviews.element.graphs import layout_nodes\nfrom datashader.layout import random_layout\nfrom colorcet import fire\n\nhv.extension('bokeh')\n\n%opts RGB Graph Nodes [bgcolor='black' width=800 height=800 xaxis=None yaxis=None]",
"_____no_output_____"
],
[
"edges_df = dd.read_parquet('../data/maccdc2012_full_edges.parq').compute()\nedges_df = edges_df.reset_index(drop=True)\ngraph = hv.Graph(edges_df)\nlen(edges_df)",
"_____no_output_____"
]
],
[
[
"## Edge bundling & layouts",
"_____no_output_____"
],
[
"Datashader and HoloViews provide support for a number of different graph layouts including circular, force atlas and random layouts. Since large graphs with thousands of edges can become quite messy when plotted datashader also provides functionality to bundle the edges.",
"_____no_output_____"
],
[
"#### Circular layout",
"_____no_output_____"
],
[
"By default the HoloViews Graph object lays out nodes using a circular layout. Once we have declared the ``Graph`` object we can simply apply the ``bundle_graph`` operation. We also overlay the datashaded graph with the nodes, letting us identify each node by hovering.",
"_____no_output_____"
]
],
[
[
"%%opts Nodes (size=5)\ncircular = bundle_graph(graph)\npad = dict(x=(-1.2, 1.2), y=(-1.2, 1.2))\ndatashade(circular, width=800, height=800) * circular.nodes.redim.range(**pad)",
"_____no_output_____"
]
],
[
[
"#### Force Atlas 2 layout",
"_____no_output_____"
],
[
"For other graph layouts you can use the ``layout_nodes`` operation supplying the datashader or NetworkX layout function. Here we will use the ``nx.spring_layout`` function based on the [Fruchterman-Reingold](https://en.wikipedia.org/wiki/Force-directed_graph_drawing) algorithm. Instead of bundling the edges we may also use the directly_connect_edges function:",
"_____no_output_____"
]
],
[
[
"%%opts Nodes (size=5)\nforceatlas = directly_connect_edges(layout_nodes(graph, layout=nx.spring_layout))\npad = dict(x=(-.5, 1.3), y=(-.5, 1.3))\ndatashade(forceatlas, width=800, height=800) * forceatlas.nodes.redim.range(**pad)",
"_____no_output_____"
]
],
[
[
"#### Random layout",
"_____no_output_____"
],
[
"Datashader also provides a number of layout functions in case you don't want to depend on NetworkX:",
"_____no_output_____"
]
],
[
[
"%%opts Nodes (size=5)\nrandom = bundle_graph(layout_nodes(graph, layout=random_layout))\npad = dict(x=(-.05, 1.05), y=(-0.05, 1.05))\ndatashade(random, width=800, height=800) * random.nodes.redim.range(**pad)",
"_____no_output_____"
]
],
[
[
"## Showing nodes with active traffic",
"_____no_output_____"
],
[
"To select just nodes with active traffic we will split the dataframe of bundled paths and then apply ``select`` on the new Graph to select just those edges with a weight of more than 10,000. By overlaying the sub-graph of high traffic edges we can take advantage of the interactive hover and tap features that bokeh provides while still revealing the full datashaded graph in the background.",
"_____no_output_____"
]
],
[
[
"%%opts Graph (edge_line_color='white' edge_hover_line_color='blue')\npad = dict(x=(-1.2, 1.2), y=(-1.2, 1.2))\ndatashade(circular, width=800, height=800) * circular.select(weight=(10000, None)).redim.range(**pad)",
"_____no_output_____"
]
],
[
[
"## Highlight TCP and UDP traffic",
"_____no_output_____"
],
[
"Using the same selection features we can highlight TCP and UDP connections separately again by overlaying it on top of the full datashaded graph. The edges can be revealed over the highlighted nodes and by setting an alpha level we can also reveal connections with both TCP (blue) and UDP (red) connections in purple.",
"_____no_output_____"
]
],
[
[
"%%opts Graph (edge_alpha=0 edge_hover_alpha=0.5 edge_nonselection_alpha=0 node_size=8 node_alpha=0.5) [color_index='weight' inspection_policy='edges']\nudp_style = dict(edge_hover_line_color='red', node_hover_size=20, node_fill_color='red', edge_selection_line_color='red')\ntcp_style = dict(edge_hover_line_color='blue', node_fill_color='blue', edge_selection_line_color='blue')\nudp = forceatlas.select(protocol='udp', weight=(10000, None)).opts(style=udp_style)\ntcp = forceatlas.select(protocol='icmp', weight=(10000, None)).opts(style=tcp_style)\ndatashade(forceatlas, width=800, height=800, normalization='log', cmap=['black', 'white']) * tcp * udp",
"_____no_output_____"
]
],
[
[
"## Coloring by protocol",
"_____no_output_____"
],
[
"As we have already seen we can easily apply selection to the ``Graph`` objects. We can use this functionality to select by protocol, datashade the subgraph for each protocol and assign each a different color and finally stack the resulting datashaded layers:",
"_____no_output_____"
]
],
[
[
"from bokeh.palettes import Blues9, Reds9, Greens9\nranges = dict(x_range=(-.5, 1.6), y_range=(-.5, 1.6), width=800, height=800)\nprotocols = [('tcp', Blues9), ('udp', Reds9), ('icmp', Greens9)]\nshaded = hv.Overlay([datashade(forceatlas.select(protocol=p), cmap=cmap, **ranges)\n for p, cmap in protocols]).collate()\nstack(shaded * dynspread(datashade(forceatlas.nodes, cmap=['white'], **ranges)), link_inputs=True)",
"_____no_output_____"
]
],
[
[
"## Selecting the highest targets",
"_____no_output_____"
],
[
"With a bit of help from pandas we can also extract the twenty most targetted nodes and overlay them on top of the datashaded plot:",
"_____no_output_____"
]
],
[
[
"%%opts RGB [width=800 height=800] Nodes (size=8)\ntarget_counts = list(edges_df.groupby('target').count().sort_values('weight').iloc[-20:].index.values)\n(datashade(forceatlas, cmap=fire[128:]) * datashade(forceatlas.nodes, cmap=['cyan']) *\n forceatlas.nodes.select(index=target_counts))",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
4a26a74b3126527db1af66663f85f7dcd3c1bfb1
| 60,743 |
ipynb
|
Jupyter Notebook
|
Ridge.ipynb
|
GalileoSama/MachineLearning
|
2b0a88884a4b0426f9396fe5275beb9778411378
|
[
"MIT"
] | null | null | null |
Ridge.ipynb
|
GalileoSama/MachineLearning
|
2b0a88884a4b0426f9396fe5275beb9778411378
|
[
"MIT"
] | null | null | null |
Ridge.ipynb
|
GalileoSama/MachineLearning
|
2b0a88884a4b0426f9396fe5275beb9778411378
|
[
"MIT"
] | null | null | null | 242.003984 | 21,836 | 0.919727 |
[
[
[
"from sklearn import datasets, metrics\nfrom sklearn.model_selection import cross_val_score, learning_curve, validation_curve # 交叉验证、学习曲线、验证曲线\nfrom sklearn.linear_model import Ridge # 岭回归模型\nfrom sklearn.externals import joblib\nimport matplotlib.pylab as plt # 可视化\nimport numpy as np\n\n# 获得数据\nX, Y = datasets.make_regression(n_samples=100, n_features=2, noise=10)\nmodel = Ridge()\nprint(model)",
"Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n"
],
[
"# 选择学习率\nalpha = 0.1\nalphas = []\nR_loss = []\nwhile alpha < 20:\n alphas.append(alpha)\n model.alpha = alpha\n # 交叉验证 分成五组训练集、测试集 \n loss = -cross_val_score(estimator=model, X=X, y=Y, cv=10, scoring='neg_mean_squared_error') \n print(model)\n R_loss.append(loss.mean())\n alpha *= 2\n# 还原model的alpha为最优值\nmodel.alpha = 2\nprint(model)",
"Ridge(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=0.2, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=0.4, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=0.8, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=1.6, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=3.2, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=6.4, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=12.8, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\nRidge(alpha=2, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n"
],
[
"# 图形化学习率与误差的曲线\nplt.plot(alphas, R_loss)\nplt.xlabel('alphas')\nplt.ylabel('loss')\nplt.show()",
"_____no_output_____"
],
[
"# 学习曲线 随训练集样本数变化(用于检测过拟合)\n# 1、高方差(过拟合问题) 随着训练集样本增加,训练误差(红色)减小,验证误差(绿色)先减后增\n# 解决:增加样本数、使用更少的特征、增加惩罚项系数值 \n# 2、高偏差(欠拟合问题) 随着训练集样本增加,训练误差(红色)缓慢增加,验证误差(绿色)缓慢减少\n# 解决:增加特征、增加高维度特征(x1,x2,x1平方,x2平方)、减少惩罚项系数值 \ntrain_sizes, train_loss, test_loss = \\\n learning_curve(model, X, Y, cv=10, scoring=\"neg_mean_squared_error\",train_sizes=[0.1, 0.25, 0.5, 0.75, 1])\ntrain_loss_mean = -np.mean(train_loss, axis=1)\ntest_loss_mean = -np.mean(test_loss, axis=1)",
"_____no_output_____"
],
[
"# 图形化学习曲线 \nplt.plot(train_sizes, train_loss_mean, 'o-', color=\"r\", label=\"Training\")\nplt.plot(train_sizes, test_loss_mean, 'o-', color=\"g\", label=\"Cross-validation\")\nprint(model)\nplt.xlabel(\"training examples\")\nplt.ylabel(\"Loss\")\nplt.legend(loc=\"best\")\nplt.show()",
"Ridge(alpha=2, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n"
],
[
"# 验证曲线(学习率alpha曲线)\nparam_range = np.linspace(0.1, 20, num=10)\ntrain_loss, test_loss \\\n = validation_curve(model, X, Y, param_name='alpha', \n param_range=param_range, cv=10, scoring=\"neg_mean_squared_error\")\ntrain_loss_mean = -np.mean(train_loss, axis=1)\ntest_loss_mean = -np.mean(test_loss, axis=1)\n\n# 图形化验证曲线 \nplt.plot(param_range, train_loss_mean, 'o-', color=\"r\", label=\"Training\")\nplt.plot(param_range, test_loss_mean, 'o-', color=\"g\", label=\"Cross-validation\")\n\nplt.xlabel(\"alpha\")\nplt.ylabel(\"loss\")\nplt.legend(loc=\"best\")\nplt.show()",
"_____no_output_____"
],
[
"# 保存模型到save/lr.pkl\njoblib.dump(model, 'save/lr.pkl')\n# 读save/lr.pkl模型文件\nmodel1 = joblib.load('save/lr.pkl')\nprint(model1)",
"Ridge(alpha=2, copy_X=True, fit_intercept=True, max_iter=None,\n normalize=False, random_state=None, solver='auto', tol=0.001)\n"
],
[
"# 误差分析(人工)\n# 分析交叉验证集中 错误预测的数据:1、类型 2、特征线索\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a26ac33fd85c8f7f1588ce146fd1170a47a006e
| 300 |
ipynb
|
Jupyter Notebook
|
Regression/Decision Tree/DecisionTreeRegressor_Normalize.ipynb
|
surya2365/ds-seed
|
74ef58479333fed95522f7b691f1209f7d70fc95
|
[
"Apache-2.0"
] | 2 |
2021-07-28T15:26:40.000Z
|
2021-07-29T04:14:35.000Z
|
Regression/Decision Tree/DecisionTreeRegressor_Normalize.ipynb
|
surya2365/ds-seed
|
74ef58479333fed95522f7b691f1209f7d70fc95
|
[
"Apache-2.0"
] | 1 |
2021-07-30T06:00:30.000Z
|
2021-07-30T06:00:30.000Z
|
Regression/Decision Tree/DecisionTreeRegressor_Normalize.ipynb
|
surya2365/ds-seed
|
74ef58479333fed95522f7b691f1209f7d70fc95
|
[
"Apache-2.0"
] | null | null | null | 14.285714 | 49 | 0.51 |
[
[
[
"# DecisionTreeRegressor with Normalize\r\n\r\n## Coming Soon",
"_____no_output_____"
]
]
] |
[
"markdown"
] |
[
[
"markdown"
]
] |
4a26ac368946606e96c755a9d5c70fb61f209a41
| 486,824 |
ipynb
|
Jupyter Notebook
|
Tutorial1_Part2_solutions.ipynb
|
fusterma/JUAS2022_Solutions
|
7071e7a4aa82e5a746139d68c08afbd0360c5287
|
[
"MIT"
] | null | null | null |
Tutorial1_Part2_solutions.ipynb
|
fusterma/JUAS2022_Solutions
|
7071e7a4aa82e5a746139d68c08afbd0360c5287
|
[
"MIT"
] | null | null | null |
Tutorial1_Part2_solutions.ipynb
|
fusterma/JUAS2022_Solutions
|
7071e7a4aa82e5a746139d68c08afbd0360c5287
|
[
"MIT"
] | null | null | null | 88.561761 | 55,779 | 0.730266 |
[
[
[
"# Tutorial 1: Part 2\n\nObjectives:\n- Learn how to define a simple lattice and compute the TWISS functions using MAD-X.\n- Thick vs thin lens approximation TWISS comparison for a lattice with only quadrupoles.\n- Tune and $\\beta$-function dependence on K1.\n\n**My first accelerator: a FODO cell**\n\n1. Make a simple lattice FODO cell with:\n - $L_{cell}$= 100 m.\n - Focusing and defocusing quadrupoles of 5 m long ($L_{quad}$). \n - Put the start of the first qudrupole at the start of the sequence. \n - Each quadrupole has a focal length f = 200 m (HINT: k1 x $L_{quad}$ = 1/f).\n\n\n2. Define a proton beam at $E_{tot}$= 2 GeV. Activate the sequence and try to find the periodic solution and plot the $\\beta$-functions. If you found $\\beta_{max}$= 460 m you succeded.\n\n\n3. Using the plot you obtained can you estimate the phase advance of the cell. Compare with the tunes obtained with the TWISS.\n\n**Matching the FODO cell using a parametric plot**\n\n4. Try to twiss it powering the quadrupoles to obtain a $\\Delta \\mu \\approx 90^\\circ$ in the cell using the thin lens approximation (HINT: using the figures from Tutorial 1: Part 1). What is the actual phase advance computed by MAD-X?\n\n**BONUS:**\n\n5. What is the $\\beta_{max}$? Compare with the thin lens approximation (using the figures from Tutorial 1: Part 1).\n\n\n6. Halve the focusing strength of the quadrupoles, what is the effect of it on the $\\beta_{max}$ and $\\beta_{min}$ and on the $\\Delta \\mu$? Compare with the parametric plots.\n\n\n7. Compute the maximum beam size $\\sigma$ assuming a normalized emittance of 3 mrad mm and $E_{tot}= 7 TeV$.\n\n\n8. Try with $E_{tot}$ = 0.7 GeV: what is the MAD-X error message?\n\n\n9. Try with f= 20 m: what is the MAD-x error message?",
"_____no_output_____"
]
],
[
[
"#Import the needed libraries\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom cpymad.madx import Madx ",
"_____no_output_____"
]
],
[
[
"# Launching MAD-X",
"_____no_output_____"
]
],
[
[
"myMad = Madx(stdout=True)",
"_____no_output_____"
]
],
[
[
"1. Make a simple lattice FODO cell with:\n - $L_{cell}$= 100 m.\n - Focusing and defocusing qudrupoles of 5 m long ($L_{quad}$). \n - Put the start of the first qudrupole at the start of the sequence. \n - Each quadrupole has a focal length f = 200 m (HINT: k1 x $L_{quad}$ = 1/f).\n\n\n2. Define a proton beam at $E_{tot}$= 2 GeV. Activate the sequence and try to find the periodic solution and plot the $\\beta$-functions. If you found $\\beta_{max}$= 460 m you succeded.\n\n<div>\n<img src=\"attachment:Imagen%201-3.png\" width=\"400\"/>\n</div>\n",
"_____no_output_____"
]
],
[
[
"#myMad = Madx(stdout=True)\nmyMad = Madx()\nmyString='''\n! *********************************************************************\n! Definition of parameters\n! *********************************************************************\n\nl_cell=100;\nquadrupoleLenght=5;\nf=200;\nmyK:=1/f/quadrupoleLenght;// m^-2\n\n! *********************************************************************\n! Definition of magnets\n! ********************************************************************* \nQF: quadrupole, L=quadrupoleLenght, K1:=myK;\nQD: quadrupole, L=quadrupoleLenght, K1:=-myK;\n\n! *********************************************************************\n! Definition of sequence\n! *********************************************************************\nmyCell:sequence, refer=entry, L=L_CELL;\nquadrupole1: QF, at=0;\nmarker1: marker, at=25;\nquadrupole2: QD, at=50;\nmarker2: marker, at=75;\nendsequence;\n\n! *********************************************************************\n! Definition of beam\n! *********************************************************************\nbeam, particle=proton, energy=2;\n\n! *********************************************************************\n! Use of the sequence\n! *********************************************************************\nuse, sequence=myCell;\n\n! *********************************************************************\n! TWISS\n! *********************************************************************\ntitle, 'My first twiss';\ntwiss, file=MyfirstFODO.madx;\nplot, haxis=s, vaxis=betx,bety,dx,colour=100, title=\"test\",file=MyfirstFODO;\n\nvalue myK;\n'''\nmyMad.input(myString);",
"\n ++++++++++++++++++++++++++++++++++++++++++++\n + MAD-X 5.07.00 (64 bit, Darwin) +\n + Support: [email protected], http://cern.ch/mad +\n + Release date: 2021.05.03 +\n + Execution date: 2022.01.13 17:18:14 +\n ++++++++++++++++++++++++++++++++++++++++++++\nenter Twiss module\n \niteration: 1 error: 0.000000E+00 deltap: 0.000000E+00\norbit: 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00 0.000000E+00\n\n++++++ table: summ\n\n length orbit5 alfa gammatr \n 100 -0 0 0 \n\n q1 dq1 betxmax dxmax \n 0.03853349451 -0.04384718845 463.6232883 0 \n\n dxrms xcomax xcorms q2 \n 0 0 0 0.03853349451 \n\n dq2 betymax dymax dyrms \n -0.04384718845 463.6232883 0 0 \n\n ycomax ycorms deltap synch_1 \n 0 0 0 0 \n\n synch_2 synch_3 synch_4 synch_5 \n 0 0 0 0 \n\n synch_6 synch_8 nflips \n 0 0 0 \nPlot - default table plotted: twiss\n\n GXPLOT-X11 1.50 initialized\n\n plot number = 1\nmyk = 0.001 ;\n"
],
[
"########################\n# Using MAD-X commands #\n########################\nmyString='''\nvalue, table(SUMM,Q1);\nvalue, table(SUMM,betymax);\n'''\nmyMad.input(myString);",
"table( summ q1 ) = 0.03853349451 ;\ntable( summ betymax ) = 463.6232883 ;\n"
],
[
"#########################\n# Using python commands #\n#########################\n# SUMM table\nmyDF=myMad.table.summ.dframe()\nmyDF\nmyDF[\"q1\"]",
"_____no_output_____"
],
[
"#########################\n# Using python commands #\n#########################\n# TWISS table\nmyDF=myMad.table.twiss.dframe()\nmyDF[['name','s','betx','bety','alfx','alfy','mux','muy']]",
"_____no_output_____"
],
[
"%matplotlib notebook\nplt.rcParams['savefig.dpi'] = 80\nplt.rcParams['figure.dpi'] = 80\n# Plot\nplt.plot(myDF['s'],myDF['betx'],'.-b',label='$\\\\beta_x$')\nplt.plot(myDF['s'],myDF['bety'],'.-r',label='$\\\\beta_y$')\nplt.legend()\nplt.grid()\nplt.xlabel('s [m]')\nplt.ylabel('[m]')\nplt.title('My first FODO cell')",
"_____no_output_____"
]
],
[
[
"**If you found $\\beta_{max}$= 463.6 m you succeded!**",
"_____no_output_____"
],
[
"# Tune from the plot\n3. Using the plot you obtained can you estimate the phase advance of the cell. Compare with the tunes obtained with the TWISS.",
"_____no_output_____"
],
[
"For the phase advance one can consider the definition\n\n\\begin{equation}\n\\mu=\\int\\frac{1}{\\beta(s)}ds.\n\\end{equation}\n\nRemember that the unit of phase in MAD-X is [2$\\pi$].",
"_____no_output_____"
]
],
[
[
"# A very basic approximation considering a constant beta\n# The mean beta value is \n1/417.*100/2/np.pi",
"_____no_output_____"
],
[
"# Computing the integral\nnp.trapz(1/myDF['betx'],myDF['s'])/2/np.pi",
"_____no_output_____"
],
[
"# Correct values from MAD-X TWISS commands\nmyDF.iloc[-1]['mux']",
"_____no_output_____"
],
[
"# Phase Advance in units of degrees\nmyDF.iloc[-1]['mux']*2*180",
"_____no_output_____"
]
],
[
[
"# **Matching the FODO cell using a parametric plot**\n4. Try to twiss it powering the quadrupoles to obtain a $\\Delta \\mu \\approx 90^\\circ$ in the cell using the thin lens approximation (using the figures from Tutorial 1: Part 1). What is the actual phase advance computed by MAD-X?",
"_____no_output_____"
]
],
[
[
"myMad = Madx(stdout=True)",
"_____no_output_____"
]
],
[
[
"<div>\n<img src=\"attachment:test.png\" width=\"600\"/>\n</div>",
"_____no_output_____"
]
],
[
[
"# From the plot from Tutoria 1 - part1, for 90 degrees phase advance k*Lcell*lq=2.8\nquadrupoleLenght=5\ncellLength=100\nmyK=2.8/cellLength/quadrupoleLenght\nprint(myK)",
"0.005599999999999999\n"
]
],
[
[
"In comparison to the previous case as we want to increase the phase advance we need to increase the k if we keep constant the length of the cell and of the quadrupole. We move towards the right on the parmetric plot of Tutorial 1 - Part 1.",
"_____no_output_____"
]
],
[
[
"myString='''\n! *********************************************************************\n! Definition of parameters\n! *********************************************************************\n\nl_cell=100;\nquadrupoleLenght=5;\nmyK:=2.8/l_cell/quadrupoleLenght;// m^-2\n\n! *********************************************************************\n! Definition of magnets\n! ********************************************************************* \nQF: quadrupole, L=quadrupoleLenght, K1:=myK;\nQD: quadrupole, L=quadrupoleLenght, K1:=-myK;\n\n! *********************************************************************\n! Definition of sequence\n! *********************************************************************\nmyCell:sequence, refer=entry, L=L_CELL;\nquadrupole1: QF, at=0;\nmarker1: marker, at=25;\nquadrupole2: QD, at=50;\nmarker2: marker, at=75;\nendsequence;\n\n! *********************************************************************\n! Definition of beam\n! *********************************************************************\nbeam, particle=proton, energy=2;\n\n! *********************************************************************\n! Use of the sequence\n! *********************************************************************\nuse, sequence=myCell;\n\n! *********************************************************************\n! TWISS\n! *********************************************************************\ntitle, 'My first twiss';\ntwiss, file=MyfirstFODO.madx;\nplot, haxis=s, vaxis=betx,bety,dx,colour=100, title=\"test\",file=MyfirstFODO;\n\nvalue myK;\n'''\nmyMad.input(myString);",
"_____no_output_____"
],
[
"myDFTable=myMad.table.twiss.dframe()\nmyDFTable[[\"name\", \"keyword\",\"betx\",\"bety\",\"alfx\",\"alfy\", \"mux\", \"muy\" ]]",
"_____no_output_____"
],
[
"myString='''\nvalue, table(SUMM,Q1);\nvalue, table(SUMM,Q2);\n'''\nmyMad.input(myString);",
"_____no_output_____"
],
[
"#Phase advance computed by MADX in rad\n0.236*2*np.pi",
"_____no_output_____"
],
[
"#Phase advance computed by MADX in degrees\n1.4828317324943823*180/np.pi",
"_____no_output_____"
]
],
[
[
"**BONUS:**",
"_____no_output_____"
],
[
"5. What is the $\\beta_{max}$? Compare with the thin lens approximation (using the figures from Tutorial 1: Part 1).",
"_____no_output_____"
]
],
[
[
"# From the MAD-X calculation\nmyDFTable['betx'].max()",
"_____no_output_____"
],
[
"myDFTable['bety'].max()",
"_____no_output_____"
],
[
"#From the parametric plot Figure 1 \n#K1*Lcell*Lq=\n0.0056*100*5",
"_____no_output_____"
]
],
[
[
"\n<div>\n<img src=\"attachment:test2.png\" width=\"600\"/>\n</div>",
"_____no_output_____"
]
],
[
[
"#From the parametric plot Figure 2 to be compared with 160.6036545763343 m (with MAD-X)\n1.697*100",
"_____no_output_____"
]
],
[
[
"6. Halve the focusing strength of the quadrupoles, what is the effect of it on the $\\beta_{max}$ and $\\beta_{min}$ and on the $\\Delta \\mu$? Compare with the parametric plots.",
"_____no_output_____"
]
],
[
[
"myString='''\n\ncellLength=100;\nquadrupoleLenght=5;\nmyK=1.4/cellLength/quadrupoleLenght;// m^-2\ntwiss, file=firstTwiss.txt;\n'''\n\nmyMad.input(myString);",
"_____no_output_____"
],
[
"myString='''\nvalue, table(SUMM,Q1);\nvalue, table(SUMM,Q2);\nvalue, table(SUMM,betxmax);\nvalue, table(SUMM,betymax);\n'''\nmyMad.input(myString);",
"_____no_output_____"
],
[
"myDFTable=myMad.table.twiss.dframe()\nmyDFTable",
"_____no_output_____"
]
],
[
[
"If we reduce the k the bmax increases and therfore the beam size.",
"_____no_output_____"
]
],
[
[
"# If compared with the thin lens approximtion\n# From the plot\n# K1*Lcell*Lq\n0.0028*100*5",
"_____no_output_____"
],
[
"#Max. betax\n2.042*100",
"_____no_output_____"
],
[
"# Value from MADX\nbmax=np.max(myDFTable[\"betx\"])\nbmax",
"_____no_output_____"
]
],
[
[
"Better agreement is observed as we move to the left on the parametric plot (smaller K or smaller Lq for a fixed value of the cell length) as the thin lens approximation condition is better satisfied.",
"_____no_output_____"
],
[
"7. Compute the maximum beam size $\\sigma$ assuming a normalized emittance of 3 mrad mm and $E_{tot}= 7 TeV$.\n\nOne has to remember\n\\begin{equation} \\sigma=\\sqrt{\\frac{\\beta \\epsilon_n}{ \\gamma_r}} \\end{equation} ",
"_____no_output_____"
]
],
[
[
"import numpy as np\nemittance_n=3e-6 #m*rad\nbeta_gamma=7000/.938 # this is an approximation\nnp.sqrt(myDFTable['betx'].max()*emittance_n/beta_gamma)",
"_____no_output_____"
]
],
[
[
"# Varying the energy\n8. Try with $E_{tot}$ = 0.7 GeV: what is the MAD-X error message?",
"_____no_output_____"
]
],
[
[
"# with this simple wrapper in case of error the code enter in an infinite loop you have to stop manually.\nmyString='''\nbeam, particle=proton, energy=0.7;\ntitle, 'My third twiss';\ntwiss;\n'''\nmyMad.input(myString);",
"_____no_output_____"
]
],
[
[
"There is an error due to the fact that the total energy is lower than the rest proton mass.\n",
"_____no_output_____"
],
[
"# With f=20 m\n9. Try with f= 20 m: what is the MAD-x error message?",
"_____no_output_____"
]
],
[
[
"myMad = Madx(stdout=True)",
"_____no_output_____"
],
[
"myString='''\n! *********************************************************************\n! Definition of parameters\n! *********************************************************************\n\nl_cell=100;\nquadrupoleLenght=5;\nf=20;\nmyK:=1/f/quadrupoleLenght;// m^-2\n\nvalue myK;\n\n! *********************************************************************\n! Definition of magnet\n! ********************************************************************* \nQF: quadrupole, L=quadrupoleLenght, K1:=myK;\nQD: quadrupole, L=quadrupoleLenght, K1:=-myK;\n\n\n! *********************************************************************\n! Definition of sequence\n! *********************************************************************\nmyCell:sequence, refer=entry, L=L_CELL;\nquadrupole1: QF, at=0;\nmarker1: marker, at=25;\nquadrupole2: QD, at=50;\nendsequence;\n\n! *********************************************************************\n! Definition of beam\n! *********************************************************************\nbeam, particle=proton, energy=2;\n\n! *********************************************************************\n! Use of the sequence\n! *********************************************************************\nuse, sequence=myCell;\n\n! *********************************************************************\n! TWISS\n! *********************************************************************\nselect, flag=twiss, clear;\nselect, flag=twiss, column=name, keyword, s, betx, alfx, mux, bety, alfy, muy, x, px, y, py, dx, dy, dx, dpx, dy, dpy;\ntwiss, file=f20.txt;\n'''\nmyMad.input(myString);",
"_____no_output_____"
]
],
[
[
"**INTERPRETATION**: The cell is unstable due to the fact that the focal length is too short. Please note the value of the cosmux and cosmuy. **REMEMBER** |Trace(M)|< 2; -1 <= cos $\\mu$ <= 1",
"_____no_output_____"
],
[
"# EXTRA\n\n# Adding markers\n\nThis is an example to add markers in the sequence using a macros.\n",
"_____no_output_____"
]
],
[
[
"myMad=Madx(stdout=True)\nmyString='''\n\n! *********************************************************************\n! Definition of parameters\n! *********************************************************************\n\noption, echo=false, info=false, warn=false;\nl_cell=100;\nquadrupoleLenght=5;\nf=200;\nmyK:=1/f/quadrupoleLenght;// m^-2\n\n! *********************************************************************\n! Definition of magnet\n! ********************************************************************* \nQF: quadrupole, L=quadrupoleLenght, K1:=myK;\nQD: quadrupole, L=quadrupoleLenght, K1:=-myK;\n\n\ninstallMarkers(nn): macro ={\nmarkernn: marker, at=nn;\n!value,f;\n};\nN=6;\n\n\n! *********************************************************************\n! Definition of sequence\n! *********************************************************************\nmyCell:sequence, REFER=centre, L=L_CELL;\nquadrupole1: QF, at=2.5;\nwhile (N<50) {\nexec, installMarkers($N);\nN=N+1;\n}\nquadrupole2: QD, at=52.5;\nN=56;\nwhile (N<100) {\nexec, installMarkers($N);\nN=N+1;\n}\nendsequence;\n\n! *********************************************************************\n! Definition of beam\n! *********************************************************************\nbeam, particle=proton, energy=2;\n\n! *********************************************************************\n! Use of the sequence\n! *********************************************************************\nuse, sequence=myCell;\n\n! *********************************************************************\n! TWISS\n! *********************************************************************\nselect, flag=twiss, clear;\nselect, flag=twiss, column=name, keyword, L, s, betx, alfx, mux, bety, alfy, muy, x, px, y, py, dx, dy, dx, dpx, dy, dpy;\n\ntitle, 'My fourth twiss';\ntwiss, file=WithMarkers.txt;\n'''\nmyMad.input(myString);",
"_____no_output_____"
],
[
"myDF=myMad.table.twiss.dframe()\nmyDF.head()",
"_____no_output_____"
],
[
"%matplotlib notebook\nplt.plot(myDF['s'],myDF['betx'],'.-b',label='$\\\\beta_x$')\nplt.plot(myDF['s'],myDF['bety'],'.-r',label='$\\\\beta_y$')\nplt.xlabel('s [m]')\nplt.ylabel('[m]')\nplt.legend(loc='best')\nplt.grid()",
"_____no_output_____"
],
[
"np.trapz(1/myDF['betx'],myDF['s'])/2/np.pi",
"_____no_output_____"
],
[
"# Value from MADX\nbmax=np.max(myDF[\"betx\"])\nbmax",
"_____no_output_____"
]
],
[
[
"# Thin lens approximation",
"_____no_output_____"
]
],
[
[
"myString='''\nselect, flag=makethin, class=quadrupole, slice=5; \nmakethin,sequence=myCell;\nuse,sequence=myCell; \ntwiss,sequence=myCell,file=thin.txt;\n'''\nmyMad.input(myString);",
"_____no_output_____"
],
[
"myDF_thin=myMad.table.twiss.dframe()\nmyDF_thin",
"_____no_output_____"
],
[
"%matplotlib notebook\nplt.plot(myDF_thin['s'],myDF_thin['betx'],'.-b',label='$\\\\beta_x$')\nplt.plot(myDF_thin['s'],myDF_thin['bety'],'.-r',label='$\\\\beta_y$')\nplt.xlabel('s [m]')\nplt.ylabel('[m]')\nplt.legend(loc='best')\nplt.grid()",
"_____no_output_____"
],
[
"np.trapz(1/myDF_thin['betx'],myDF_thin['s'])/2/np.pi",
"_____no_output_____"
],
[
"mux=np.max(myDF_thin[\"mux\"])\nmux",
"_____no_output_____"
],
[
"# Value from MADX\nbmax=np.max(myDF_thin[\"betx\"])\nbmax",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a26bc5e6cf9d831b896a79a643aa929e9e78984
| 9,555 |
ipynb
|
Jupyter Notebook
|
Algorithms/SearchingAndSorting/QuickSort.ipynb
|
prashanta99/Python_Computer_Science
|
f971e03a4c948747eccf99658ba7b81c55749073
|
[
"MIT"
] | null | null | null |
Algorithms/SearchingAndSorting/QuickSort.ipynb
|
prashanta99/Python_Computer_Science
|
f971e03a4c948747eccf99658ba7b81c55749073
|
[
"MIT"
] | null | null | null |
Algorithms/SearchingAndSorting/QuickSort.ipynb
|
prashanta99/Python_Computer_Science
|
f971e03a4c948747eccf99658ba7b81c55749073
|
[
"MIT"
] | null | null | null | 32.5 | 167 | 0.506436 |
[
[
[
"# QuickSort\n\n- Based on Divide and Conquer Technique\n\n- The array is divided into **Partitions Recursively** \n\n- The technique to create the partitions is the **backbone** of this Algorithm",
"_____no_output_____"
],
[
"### QuickSort - What it is\n\nIn Short:\n- 1. Define a Pivot element ( can be first element, last element or any random element from the array)\n- 2. Find the correct position of the Pivot element in the array\n- 3. This correct position of Pivot will partition the array in \n Left partition (values less than Pivot) and \n Right partition (values greater than Pivot) (w.r.t pivot)\n- 4. Now take the Left Partition - Repeat steps 1-3\n- 5. Take the Right Partition - Repeat steps 1-3\n- 6. Repeat 4 and 5 until there is only one element left in the partition\n\n<span style='color:red'>**Note:**</span> Steps 4 and 5 can be handled **recursively**\n\n### Highlevel Steps\n\n1. To create this partition, we choose a random element as **Pivot element** <br>\n Ex, <span style='color:gray'>**| 10 | 15 | 1 | 2 | 9 | 16 | 11 |**</span> -- Choose **10** as Pivot Element<br>\n \n\n2. Move elements **Less than the Pivot Element** to **Left of the Pivot Element**. This makes up <span style='color:blue'>**Left Partition**</span> & <br>\n <span style='color:gray'>**| 1 | 2 | 9 |**</span><br>\n \n\n3. Move elements **Greater than the Pivot Element** to **Right of the Pivot Element**. This makes up <span style='color:blue'>**Right Partition**</span> <br>\n <span style='color:gray'>**| 15 | 16 | 11 |**</span><br>\n \n\n4. Equal Elements can go in either of the partitions <br> \n\n After one complete iteration we get:\n<span style='color:maroon'>**|Left Partition | Pivot | Right Partition|**</span> <br>\n<span style='color:gray'>**| 1 | 2 | 9 | <span style='color:blue'><u>10</u></span> | 15 | 16 | 11 |**</span> <br>\nWe see that, in this process, the Pivot element **10** gets into its **correct sorted position**\n\n\n5. Repeat steps-1 to 4 each for the Left and Right subpartitions, Continue until there is only one element left \n\n",
"_____no_output_____"
],
[
"### Steps to Create the Partitions\nPerformed for each subpartitions - At beginning whole array is one Partition\n\n1. Select the Pivot Element - Say 10\n\n \n2. Create a **Start Marker** that will start from left end and move right\n\n \n3. Create a **End Marker** that will start from right end and move left\n\n \n4. Move the **Start Marker** to right and compare that element with the Pivot element\n -- Say 15 compare with 10<br>\n Keep moving the **Start Marker** as long as the <span style='color:maroon'>element is **<=** Pivot Element</span> <br>\n\n\n5. Move the **End Marker** to left and compare that element with the Pivot element\n -- Say 11 compare with 10<br>\n Keep moving the **End Marker** to left as long as the <span style='color:maroon'>element is **>** Pivot Element</span> <br>\n \n6. When both the conditions of step-4 and step-5 are met --> **Swap** the elements at the start and end positions\n\n \n7. After swapping continue with steps 4 and 5 from where the start and end stopped\n \n \n8. When start marker and end marker crosses each other - **Swap** the Pivot element with the element at the End Marker\n \n9. The **End Marker** thus bring the Pivot element to its correct position and this divides the array into two partitions<br>\n At this time - we get <br>\n a. The Pivot element placed at its correct position<br>\n b. creation of left partition<br>\n c. creation of right partition<br>\n <b>[Left Partition] [Pivot] [Right Partition]</b>\n \n \n10. We then take the left partition - Repeat steps - 1 to 9\n \n \n11. Next we take the right partition - Repeat steps - 1 to 9\n \n\n\n \n10. Steps 10 and 11 are **Recursive**",
"_____no_output_____"
]
],
[
[
"def create_partition(numlist, start, end):\n \"\"\"\n This \n input: numlist - numlistition to work on\n start - Left marker\n end - Right Marker\n \"\"\"\n \n pivot = numlist[start]\n pivot_ind = start\n N = end\n \n while(start < end):\n while(numlist[start] <= pivot):\n if start == N :\n break\n start += 1\n \n while(numlist[end] > pivot):\n if end == 0:\n break\n end -= 1\n \n if start < end:\n numlist[start], numlist[end] = numlist[end], numlist[start]\n \n numlist[end], numlist[pivot_ind] = numlist[pivot_ind], numlist[end]\n return end\n \n ",
"_____no_output_____"
],
[
"# Note: Though we are using Recursion, No explicit Base case is Needed as we are Sorting InPlace\n# and there is implicit basecase in the condition if start< end\n# Implicit base case for exit is when start = end (when there is only one element left)\n \ndef quicksort(numlist, start, end):\n \n if start < end:\n loc = create_partition(numlist, start, end) # partition location\n quicksort(numlist, start, loc - 1) # Left Partition\n quicksort(numlist, loc+1, end) # Right Partition\n",
"_____no_output_____"
],
[
"print()\nnumlist = [10, 15, 1, 2, 9, 16, 11]\nprint(\"Original :\",numlist)\nquicksort(numlist, 0, len(numlist) - 1)\nprint(\"Sorted. :\",numlist)\n\n\nprint()\nnumlist = [7 , 6, 10, 5, 9, 2, 1, 15, 7]\nprint(\"Original :\",numlist)\nquicksort(numlist, 0, len(numlist) - 1)\nprint(\"Sorted. :\",numlist) ",
"\nOriginal : [10, 15, 1, 2, 9, 16, 11]\nSorted. : [1, 2, 9, 10, 11, 15, 16]\n\nOriginal : [7, 6, 10, 5, 9, 2, 1, 15, 7]\nSorted. : [1, 2, 5, 6, 7, 7, 9, 10, 15]\n"
]
],
[
[
"# Option2: Using For Loop instead of While loops",
"_____no_output_____"
]
],
[
[
"def quicksort2(arr, start, end):\n if start <= end:\n loc = create_partitions2(arr, start, end)\n quicksort(arr, start, loc - 1)\n quicksort(arr, loc + 1, end)\n",
"_____no_output_____"
],
[
"def create_partitions2(arr, start, end):\n pivot = arr[start]\n N = len(arr)\n \n i = start - 1\n for j in range(start, end + 1):\n if arr[j] <= pivot:\n i += 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[i], arr[start] = arr[start], arr[i]\n \n return i",
"_____no_output_____"
],
[
"print()\nnumlist = [7 , 6, 10, 5, 9, 2, 1, 15, 7]\nprint(\"Original :\",numlist)\nquicksort2(numlist, 0, len(numlist) - 1)\nprint(\"Sorted. :\",numlist) ",
"\nOriginal : [7, 6, 10, 5, 9, 2, 1, 15, 7]\nSorted. : [1, 2, 5, 6, 7, 7, 9, 10, 15]\n"
]
]
] |
[
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
4a26bfa3326442d39f66c7686ac9f211b5b630e8
| 169,177 |
ipynb
|
Jupyter Notebook
|
udacity lesson 8 time series/l08c07_forecasting_with_stateful_rnn.ipynb
|
seyfullah/stockprediction
|
aab0547cc1316a116ad032137722b73a36e67a51
|
[
"Apache-2.0"
] | null | null | null |
udacity lesson 8 time series/l08c07_forecasting_with_stateful_rnn.ipynb
|
seyfullah/stockprediction
|
aab0547cc1316a116ad032137722b73a36e67a51
|
[
"Apache-2.0"
] | null | null | null |
udacity lesson 8 time series/l08c07_forecasting_with_stateful_rnn.ipynb
|
seyfullah/stockprediction
|
aab0547cc1316a116ad032137722b73a36e67a51
|
[
"Apache-2.0"
] | null | null | null | 158.25725 | 69,568 | 0.84382 |
[
[
[
"##### Copyright 2018 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Forecasting with a stateful RNN",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c07_forecasting_with_stateful_rnn.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c07_forecasting_with_stateful_rnn.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nkeras = tf.keras",
"_____no_output_____"
],
[
"def plot_series(time, series, format=\"-\", start=0, end=None, label=None):\n plt.plot(time[start:end], series[start:end], format, label=label)\n plt.xlabel(\"Time\")\n plt.ylabel(\"Value\")\n if label:\n plt.legend(fontsize=14)\n plt.grid(True)\n \ndef trend(time, slope=0):\n return slope * time\n \n \ndef seasonal_pattern(season_time):\n \"\"\"Just an arbitrary pattern, you can change it if you wish\"\"\"\n return np.where(season_time < 0.4,\n np.cos(season_time * 2 * np.pi),\n 1 / np.exp(3 * season_time))\n\n \ndef seasonality(time, period, amplitude=1, phase=0):\n \"\"\"Repeats the same pattern at each period\"\"\"\n season_time = ((time + phase) % period) / period\n return amplitude * seasonal_pattern(season_time)\n \n \ndef white_noise(time, noise_level=1, seed=None):\n rnd = np.random.RandomState(seed)\n return rnd.randn(len(time)) * noise_level",
"_____no_output_____"
],
[
"time = np.arange(4 * 365 + 1)\n\nslope = 0.05\nbaseline = 10\namplitude = 40\nseries = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)\n\nnoise_level = 5\nnoise = white_noise(time, noise_level, seed=42)\n\nseries += noise\n\nplt.figure(figsize=(10, 6))\nplot_series(time, series)\nplt.show()",
"_____no_output_____"
],
[
"split_time = 1000\ntime_train = time[:split_time]\nx_train = series[:split_time]\ntime_valid = time[split_time:]\nx_valid = series[split_time:]",
"_____no_output_____"
]
],
[
[
"## Stateful RNN Forecasting",
"_____no_output_____"
]
],
[
[
"def sequential_window_dataset(series, window_size):\n series = tf.expand_dims(series, axis=-1)\n ds = tf.data.Dataset.from_tensor_slices(series)\n ds = ds.window(window_size + 1, shift=window_size, drop_remainder=True)\n ds = ds.flat_map(lambda window: window.batch(window_size + 1))\n ds = ds.map(lambda window: (window[:-1], window[1:]))\n return ds.batch(1).prefetch(1)",
"_____no_output_____"
],
[
"for X_batch, y_batch in sequential_window_dataset(tf.range(10), 3):\n print(X_batch.numpy(), y_batch.numpy())",
"[[[0]\n [1]\n [2]]] [[[1]\n [2]\n [3]]]\n[[[3]\n [4]\n [5]]] [[[4]\n [5]\n [6]]]\n[[[6]\n [7]\n [8]]] [[[7]\n [8]\n [9]]]\n"
],
[
"class ResetStatesCallback(keras.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs):\n self.model.reset_states()",
"_____no_output_____"
],
[
"keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nwindow_size = 30\ntrain_set = sequential_window_dataset(x_train, window_size)\n\nmodel = keras.models.Sequential([\n keras.layers.SimpleRNN(100, return_sequences=True, stateful=True,\n batch_input_shape=[1, None, 1]),\n keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),\n keras.layers.Dense(1),\n keras.layers.Lambda(lambda x: x * 200.0)\n])\nlr_schedule = keras.callbacks.LearningRateScheduler(\n lambda epoch: 1e-8 * 10**(epoch / 30))\nreset_states = ResetStatesCallback()\noptimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)\nmodel.compile(loss=keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\nhistory = model.fit(train_set, epochs=100,\n callbacks=[lr_schedule, reset_states])",
"Epoch 1/100\n33/33 [==============================] - 2s 9ms/step - loss: 119.4313 - mae: 119.9313\nEpoch 2/100\n33/33 [==============================] - 0s 8ms/step - loss: 90.0519 - mae: 90.5519\nEpoch 3/100\n33/33 [==============================] - 0s 7ms/step - loss: 56.3640 - mae: 56.8630\nEpoch 4/100\n33/33 [==============================] - 0s 7ms/step - loss: 35.0965 - mae: 35.5923\nEpoch 5/100\n33/33 [==============================] - 0s 7ms/step - loss: 30.4513 - mae: 30.9471\nEpoch 6/100\n33/33 [==============================] - 0s 7ms/step - loss: 29.5937 - mae: 30.0834\nEpoch 7/100\n33/33 [==============================] - 0s 7ms/step - loss: 28.6775 - mae: 29.1709\nEpoch 8/100\n33/33 [==============================] - 0s 8ms/step - loss: 27.6872 - mae: 28.1803\nEpoch 9/100\n33/33 [==============================] - 0s 7ms/step - loss: 26.8164 - mae: 27.3123\nEpoch 10/100\n33/33 [==============================] - 0s 8ms/step - loss: 25.9618 - mae: 26.4572\nEpoch 11/100\n33/33 [==============================] - 0s 5ms/step - loss: 25.2754 - mae: 25.7685\nEpoch 12/100\n33/33 [==============================] - 0s 7ms/step - loss: 24.7837 - mae: 25.2743\nEpoch 13/100\n33/33 [==============================] - 0s 7ms/step - loss: 24.4394 - mae: 24.9314\nEpoch 14/100\n33/33 [==============================] - 0s 7ms/step - loss: 24.1342 - mae: 24.6261\nEpoch 15/100\n33/33 [==============================] - 0s 7ms/step - loss: 23.8476 - mae: 24.3397\nEpoch 16/100\n33/33 [==============================] - 0s 7ms/step - loss: 23.5582 - mae: 24.0525\nEpoch 17/100\n33/33 [==============================] - 0s 7ms/step - loss: 23.2495 - mae: 23.7450\nEpoch 18/100\n33/33 [==============================] - 0s 7ms/step - loss: 22.9201 - mae: 23.4147\nEpoch 19/100\n33/33 [==============================] - 0s 7ms/step - loss: 22.5683 - mae: 23.0612\nEpoch 20/100\n33/33 [==============================] - 0s 7ms/step - loss: 22.1891 - mae: 22.6803\nEpoch 21/100\n33/33 [==============================] - 0s 7ms/step - loss: 21.8070 - mae: 22.2975\nEpoch 22/100\n33/33 [==============================] - 0s 8ms/step - loss: 21.4132 - mae: 21.9034\nEpoch 23/100\n33/33 [==============================] - 0s 7ms/step - loss: 21.0061 - mae: 21.4978\nEpoch 24/100\n33/33 [==============================] - 0s 7ms/step - loss: 20.5836 - mae: 21.0754\nEpoch 25/100\n33/33 [==============================] - 0s 7ms/step - loss: 20.1647 - mae: 20.6565\nEpoch 26/100\n33/33 [==============================] - 0s 7ms/step - loss: 19.7060 - mae: 20.1995\nEpoch 27/100\n33/33 [==============================] - 0s 7ms/step - loss: 19.2027 - mae: 19.6949\nEpoch 28/100\n33/33 [==============================] - 0s 7ms/step - loss: 18.7009 - mae: 19.1932\nEpoch 29/100\n33/33 [==============================] - 0s 7ms/step - loss: 18.2159 - mae: 18.7067\nEpoch 30/100\n33/33 [==============================] - 0s 8ms/step - loss: 17.7691 - mae: 18.2563\nEpoch 31/100\n33/33 [==============================] - 0s 7ms/step - loss: 17.3457 - mae: 17.8295\nEpoch 32/100\n33/33 [==============================] - 0s 7ms/step - loss: 16.9574 - mae: 17.4414\nEpoch 33/100\n33/33 [==============================] - 0s 8ms/step - loss: 16.5762 - mae: 17.0622\nEpoch 34/100\n33/33 [==============================] - 0s 7ms/step - loss: 16.1832 - mae: 16.6715\nEpoch 35/100\n33/33 [==============================] - 0s 7ms/step - loss: 15.7808 - mae: 16.2682\nEpoch 36/100\n33/33 [==============================] - 0s 7ms/step - loss: 15.3669 - mae: 15.8544\nEpoch 37/100\n33/33 [==============================] - 0s 7ms/step - loss: 14.9374 - mae: 15.4263\nEpoch 38/100\n33/33 [==============================] - 0s 7ms/step - loss: 14.5034 - mae: 14.9849\nEpoch 39/100\n33/33 [==============================] - 0s 7ms/step - loss: 14.2027 - mae: 14.6829\nEpoch 40/100\n33/33 [==============================] - 0s 6ms/step - loss: 13.9580 - mae: 14.4411\nEpoch 41/100\n33/33 [==============================] - 0s 7ms/step - loss: 13.6996 - mae: 14.1806\nEpoch 42/100\n33/33 [==============================] - 0s 7ms/step - loss: 13.4525 - mae: 13.9377\nEpoch 43/100\n33/33 [==============================] - 0s 8ms/step - loss: 13.1487 - mae: 13.6336\nEpoch 44/100\n33/33 [==============================] - 0s 8ms/step - loss: 13.1089 - mae: 13.5971\nEpoch 45/100\n33/33 [==============================] - 0s 8ms/step - loss: 12.8749 - mae: 13.3585\nEpoch 46/100\n33/33 [==============================] - 0s 8ms/step - loss: 12.6673 - mae: 13.1500\nEpoch 47/100\n33/33 [==============================] - 0s 7ms/step - loss: 12.2477 - mae: 12.7328\nEpoch 48/100\n33/33 [==============================] - 0s 7ms/step - loss: 11.9487 - mae: 12.4308\nEpoch 49/100\n33/33 [==============================] - 0s 8ms/step - loss: 12.4024 - mae: 12.8900\nEpoch 50/100\n33/33 [==============================] - 0s 7ms/step - loss: 12.0542 - mae: 12.5407\nEpoch 51/100\n33/33 [==============================] - 0s 8ms/step - loss: 11.6568 - mae: 12.1472\nEpoch 52/100\n33/33 [==============================] - 0s 7ms/step - loss: 10.6105 - mae: 11.0937\nEpoch 53/100\n33/33 [==============================] - 0s 8ms/step - loss: 12.3417 - mae: 12.8290\nEpoch 54/100\n33/33 [==============================] - 0s 7ms/step - loss: 11.0173 - mae: 11.4982\nEpoch 55/100\n33/33 [==============================] - 0s 7ms/step - loss: 12.3301 - mae: 12.8176\nEpoch 56/100\n33/33 [==============================] - 0s 8ms/step - loss: 10.4004 - mae: 10.8789\nEpoch 57/100\n33/33 [==============================] - 0s 8ms/step - loss: 9.3884 - mae: 9.8718\nEpoch 58/100\n33/33 [==============================] - 0s 6ms/step - loss: 10.6490 - mae: 11.1341\nEpoch 59/100\n33/33 [==============================] - 0s 7ms/step - loss: 10.1021 - mae: 10.5810\nEpoch 60/100\n33/33 [==============================] - 0s 7ms/step - loss: 11.3662 - mae: 11.8522\nEpoch 61/100\n33/33 [==============================] - 0s 8ms/step - loss: 8.9484 - mae: 9.4266\nEpoch 62/100\n33/33 [==============================] - 0s 7ms/step - loss: 14.5097 - mae: 14.9972\nEpoch 63/100\n33/33 [==============================] - 0s 8ms/step - loss: 8.2728 - mae: 8.7578\nEpoch 64/100\n33/33 [==============================] - 0s 8ms/step - loss: 13.1177 - mae: 13.6079\nEpoch 65/100\n33/33 [==============================] - 0s 7ms/step - loss: 8.7895 - mae: 9.2745\nEpoch 66/100\n33/33 [==============================] - 0s 7ms/step - loss: 11.9236 - mae: 12.4163\nEpoch 67/100\n33/33 [==============================] - 0s 7ms/step - loss: 9.9367 - mae: 10.4259\nEpoch 68/100\n33/33 [==============================] - 0s 8ms/step - loss: 16.1757 - mae: 16.6682\nEpoch 69/100\n33/33 [==============================] - 0s 7ms/step - loss: 10.7244 - mae: 11.2136\nEpoch 70/100\n33/33 [==============================] - 0s 7ms/step - loss: 9.4512 - mae: 9.9357\nEpoch 71/100\n33/33 [==============================] - 0s 7ms/step - loss: 9.9397 - mae: 10.4251\nEpoch 72/100\n33/33 [==============================] - 0s 8ms/step - loss: 11.9470 - mae: 12.4392\nEpoch 73/100\n33/33 [==============================] - 0s 7ms/step - loss: 12.1749 - mae: 12.6687\nEpoch 74/100\n33/33 [==============================] - 0s 8ms/step - loss: 20.6613 - mae: 21.1578\nEpoch 75/100\n33/33 [==============================] - 0s 7ms/step - loss: 16.6655 - mae: 17.1586\nEpoch 76/100\n33/33 [==============================] - 0s 7ms/step - loss: 11.1370 - mae: 11.6304\nEpoch 77/100\n33/33 [==============================] - 0s 7ms/step - loss: 17.7730 - mae: 18.2651\nEpoch 78/100\n33/33 [==============================] - 0s 8ms/step - loss: 23.4866 - mae: 23.9845\nEpoch 79/100\n33/33 [==============================] - 0s 7ms/step - loss: 17.5961 - mae: 18.0917\nEpoch 80/100\n33/33 [==============================] - 0s 8ms/step - loss: 20.9069 - mae: 21.4020\nEpoch 81/100\n33/33 [==============================] - 0s 8ms/step - loss: 10.2051 - mae: 10.6958\nEpoch 82/100\n33/33 [==============================] - 0s 8ms/step - loss: 22.0845 - mae: 22.5819\nEpoch 83/100\n33/33 [==============================] - 0s 7ms/step - loss: 31.4105 - mae: 31.9075\nEpoch 84/100\n33/33 [==============================] - 0s 7ms/step - loss: 21.8840 - mae: 22.3786\nEpoch 85/100\n33/33 [==============================] - 0s 8ms/step - loss: 42.7594 - mae: 43.2557\nEpoch 86/100\n33/33 [==============================] - 0s 8ms/step - loss: 51.8808 - mae: 52.3790\nEpoch 87/100\n33/33 [==============================] - 0s 7ms/step - loss: 52.1129 - mae: 52.6118\nEpoch 88/100\n33/33 [==============================] - 0s 8ms/step - loss: 29.4783 - mae: 29.9770\nEpoch 89/100\n33/33 [==============================] - 0s 7ms/step - loss: 42.6453 - mae: 43.1440\nEpoch 90/100\n33/33 [==============================] - 0s 8ms/step - loss: 31.1295 - mae: 31.6230\nEpoch 91/100\n33/33 [==============================] - 0s 7ms/step - loss: 47.7657 - mae: 48.2645\nEpoch 92/100\n33/33 [==============================] - 0s 8ms/step - loss: 32.1051 - mae: 32.6013\nEpoch 93/100\n33/33 [==============================] - 0s 7ms/step - loss: 30.1079 - mae: 30.6053\nEpoch 94/100\n33/33 [==============================] - 0s 7ms/step - loss: 36.9649 - mae: 37.4600\nEpoch 95/100\n33/33 [==============================] - 0s 8ms/step - loss: 30.2277 - mae: 30.7257\nEpoch 96/100\n33/33 [==============================] - 0s 7ms/step - loss: 52.6630 - mae: 53.1618\nEpoch 97/100\n33/33 [==============================] - 0s 8ms/step - loss: 40.8218 - mae: 41.3216\nEpoch 98/100\n33/33 [==============================] - 0s 8ms/step - loss: 44.8692 - mae: 45.3634\nEpoch 99/100\n33/33 [==============================] - 0s 6ms/step - loss: 54.9029 - mae: 55.3962\nEpoch 100/100\n33/33 [==============================] - 0s 7ms/step - loss: 69.9059 - mae: 70.4046\n"
],
[
"plt.semilogx(history.history[\"lr\"], history.history[\"loss\"])\nplt.axis([1e-8, 1e-4, 0, 30])",
"_____no_output_____"
],
[
"keras.backend.clear_session()\ntf.random.set_seed(42)\nnp.random.seed(42)\n\nwindow_size = 30\ntrain_set = sequential_window_dataset(x_train, window_size)\nvalid_set = sequential_window_dataset(x_valid, window_size)\n\nmodel = keras.models.Sequential([\n keras.layers.SimpleRNN(100, return_sequences=True, stateful=True,\n batch_input_shape=[1, None, 1]),\n keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),\n keras.layers.Dense(1),\n keras.layers.Lambda(lambda x: x * 200.0)\n])\noptimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)\nmodel.compile(loss=keras.losses.Huber(),\n optimizer=optimizer,\n metrics=[\"mae\"])\nreset_states = ResetStatesCallback()\nmodel_checkpoint = keras.callbacks.ModelCheckpoint(\n \"my_checkpoint.h5\", save_best_only=True)\nearly_stopping = keras.callbacks.EarlyStopping(patience=50)\nmodel.fit(train_set, epochs=500,\n validation_data=valid_set,\n callbacks=[early_stopping, model_checkpoint, reset_states])",
"Epoch 1/500\n33/33 [==============================] - 2s 30ms/step - loss: 92.9155 - mae: 93.4144 - val_loss: 25.0215 - val_mae: 25.5176\nEpoch 2/500\n33/33 [==============================] - 0s 10ms/step - loss: 29.4368 - mae: 29.9290 - val_loss: 10.9142 - val_mae: 11.4074\nEpoch 3/500\n33/33 [==============================] - 0s 10ms/step - loss: 25.5872 - mae: 26.0791 - val_loss: 12.0569 - val_mae: 12.5479\nEpoch 4/500\n33/33 [==============================] - 0s 10ms/step - loss: 23.9572 - mae: 24.4492 - val_loss: 10.4488 - val_mae: 10.9391\nEpoch 5/500\n33/33 [==============================] - 0s 10ms/step - loss: 23.0645 - mae: 23.5578 - val_loss: 9.9581 - val_mae: 10.4499\nEpoch 6/500\n33/33 [==============================] - 0s 10ms/step - loss: 22.0755 - mae: 22.5678 - val_loss: 9.1539 - val_mae: 9.6415\nEpoch 7/500\n33/33 [==============================] - 0s 10ms/step - loss: 21.2401 - mae: 21.7314 - val_loss: 8.3393 - val_mae: 8.8249\nEpoch 8/500\n33/33 [==============================] - 0s 10ms/step - loss: 20.4172 - mae: 20.9068 - val_loss: 7.8259 - val_mae: 8.3123\nEpoch 9/500\n33/33 [==============================] - 0s 10ms/step - loss: 19.6565 - mae: 20.1471 - val_loss: 7.3023 - val_mae: 7.7879\nEpoch 10/500\n33/33 [==============================] - 0s 10ms/step - loss: 19.0049 - mae: 19.4990 - val_loss: 6.8635 - val_mae: 7.3454\nEpoch 11/500\n33/33 [==============================] - 0s 10ms/step - loss: 18.3971 - mae: 18.8908 - val_loss: 6.5418 - val_mae: 7.0255\nEpoch 12/500\n33/33 [==============================] - 0s 10ms/step - loss: 17.8525 - mae: 18.3409 - val_loss: 6.3124 - val_mae: 6.7950\nEpoch 13/500\n33/33 [==============================] - 0s 10ms/step - loss: 17.3973 - mae: 17.8864 - val_loss: 6.1875 - val_mae: 6.6708\nEpoch 14/500\n33/33 [==============================] - 0s 10ms/step - loss: 16.9643 - mae: 17.4542 - val_loss: 6.0323 - val_mae: 6.5165\nEpoch 15/500\n33/33 [==============================] - 0s 9ms/step - loss: 16.5861 - mae: 17.0715 - val_loss: 5.9862 - val_mae: 6.4694\nEpoch 16/500\n33/33 [==============================] - 0s 10ms/step - loss: 16.1996 - mae: 16.6833 - val_loss: 5.9122 - val_mae: 6.3946\nEpoch 17/500\n33/33 [==============================] - 0s 10ms/step - loss: 15.8567 - mae: 16.3415 - val_loss: 5.8466 - val_mae: 6.3273\nEpoch 18/500\n33/33 [==============================] - 0s 10ms/step - loss: 15.5420 - mae: 16.0290 - val_loss: 5.7965 - val_mae: 6.2760\nEpoch 19/500\n33/33 [==============================] - 0s 10ms/step - loss: 15.2402 - mae: 15.7266 - val_loss: 5.7590 - val_mae: 6.2378\nEpoch 20/500\n33/33 [==============================] - 0s 10ms/step - loss: 14.9521 - mae: 15.4378 - val_loss: 5.7191 - val_mae: 6.1963\nEpoch 21/500\n33/33 [==============================] - 0s 10ms/step - loss: 14.6896 - mae: 15.1723 - val_loss: 5.7028 - val_mae: 6.1786\nEpoch 22/500\n33/33 [==============================] - 0s 10ms/step - loss: 14.4497 - mae: 14.9270 - val_loss: 5.7120 - val_mae: 6.1868\nEpoch 23/500\n33/33 [==============================] - 0s 10ms/step - loss: 14.2400 - mae: 14.7165 - val_loss: 5.7473 - val_mae: 6.2220\nEpoch 24/500\n33/33 [==============================] - 0s 10ms/step - loss: 14.0576 - mae: 14.5349 - val_loss: 5.8241 - val_mae: 6.3028\nEpoch 25/500\n33/33 [==============================] - 0s 11ms/step - loss: 13.8906 - mae: 14.3707 - val_loss: 5.9584 - val_mae: 6.4395\nEpoch 26/500\n33/33 [==============================] - 0s 10ms/step - loss: 13.7340 - mae: 14.2164 - val_loss: 6.0587 - val_mae: 6.5404\nEpoch 27/500\n33/33 [==============================] - 0s 10ms/step - loss: 13.5813 - mae: 14.0647 - val_loss: 6.1350 - val_mae: 6.6167\nEpoch 28/500\n33/33 [==============================] - 0s 11ms/step - loss: 13.4339 - mae: 13.9172 - val_loss: 6.2112 - val_mae: 6.6920\nEpoch 29/500\n33/33 [==============================] - 0s 10ms/step - loss: 13.2932 - mae: 13.7745 - val_loss: 6.3077 - val_mae: 6.7891\nEpoch 30/500\n33/33 [==============================] - 0s 10ms/step - loss: 13.1590 - mae: 13.6400 - val_loss: 6.4201 - val_mae: 6.9006\nEpoch 31/500\n33/33 [==============================] - 0s 10ms/step - loss: 13.0313 - mae: 13.5120 - val_loss: 6.5523 - val_mae: 7.0327\nEpoch 32/500\n33/33 [==============================] - 0s 11ms/step - loss: 12.9126 - mae: 13.3932 - val_loss: 6.6859 - val_mae: 7.1654\nEpoch 33/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.7996 - mae: 13.2767 - val_loss: 6.8139 - val_mae: 7.2941\nEpoch 34/500\n33/33 [==============================] - 0s 11ms/step - loss: 12.6894 - mae: 13.1681 - val_loss: 6.8729 - val_mae: 7.3541\nEpoch 35/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.5806 - mae: 13.0576 - val_loss: 6.8389 - val_mae: 7.3182\nEpoch 36/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.4498 - mae: 12.9254 - val_loss: 6.7921 - val_mae: 7.2707\nEpoch 37/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.3243 - mae: 12.8017 - val_loss: 6.7235 - val_mae: 7.2015\nEpoch 38/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.2027 - mae: 12.6807 - val_loss: 6.6948 - val_mae: 7.1732\nEpoch 39/500\n33/33 [==============================] - 0s 10ms/step - loss: 12.0908 - mae: 12.5680 - val_loss: 6.6938 - val_mae: 7.1731\nEpoch 40/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.9951 - mae: 12.4740 - val_loss: 6.7496 - val_mae: 7.2292\nEpoch 41/500\n33/33 [==============================] - 0s 14ms/step - loss: 11.9158 - mae: 12.3966 - val_loss: 6.8581 - val_mae: 7.3382\nEpoch 42/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.8339 - mae: 12.3150 - val_loss: 6.9451 - val_mae: 7.4273\nEpoch 43/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.7418 - mae: 12.2250 - val_loss: 6.9882 - val_mae: 7.4710\nEpoch 44/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.6381 - mae: 12.1204 - val_loss: 6.9493 - val_mae: 7.4307\nEpoch 45/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.5345 - mae: 12.0148 - val_loss: 6.8679 - val_mae: 7.3469\nEpoch 46/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.4335 - mae: 11.9123 - val_loss: 6.7928 - val_mae: 7.2708\nEpoch 47/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.3372 - mae: 11.8156 - val_loss: 6.7779 - val_mae: 7.2560\nEpoch 48/500\n33/33 [==============================] - 0s 12ms/step - loss: 11.2488 - mae: 11.7278 - val_loss: 6.8247 - val_mae: 7.3038\nEpoch 49/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.1676 - mae: 11.6479 - val_loss: 6.9032 - val_mae: 7.3848\nEpoch 50/500\n33/33 [==============================] - 0s 10ms/step - loss: 11.0875 - mae: 11.5701 - val_loss: 6.9969 - val_mae: 7.4815\nEpoch 51/500\n33/33 [==============================] - 0s 11ms/step - loss: 11.0096 - mae: 11.4914 - val_loss: 7.0716 - val_mae: 7.5577\nEpoch 52/500\n33/33 [==============================] - 0s 11ms/step - loss: 10.9322 - mae: 11.4147 - val_loss: 7.0975 - val_mae: 7.5837\nEpoch 53/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.8518 - mae: 11.3333 - val_loss: 7.0847 - val_mae: 7.5708\nEpoch 54/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.7714 - mae: 11.2526 - val_loss: 7.0536 - val_mae: 7.5395\nEpoch 55/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.6923 - mae: 11.1731 - val_loss: 7.0385 - val_mae: 7.5244\nEpoch 56/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.6178 - mae: 11.0976 - val_loss: 7.0592 - val_mae: 7.5455\nEpoch 57/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.5485 - mae: 11.0270 - val_loss: 7.1071 - val_mae: 7.5936\nEpoch 58/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.4799 - mae: 10.9590 - val_loss: 7.1586 - val_mae: 7.6450\nEpoch 59/500\n33/33 [==============================] - 0s 9ms/step - loss: 10.4098 - mae: 10.8890 - val_loss: 7.2046 - val_mae: 7.6913\nEpoch 60/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.3384 - mae: 10.8173 - val_loss: 7.2325 - val_mae: 7.7193\nEpoch 61/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.2668 - mae: 10.7452 - val_loss: 7.2378 - val_mae: 7.7247\nEpoch 62/500\n33/33 [==============================] - 0s 11ms/step - loss: 10.1982 - mae: 10.6775 - val_loss: 7.2325 - val_mae: 7.7192\nEpoch 63/500\n33/33 [==============================] - 0s 11ms/step - loss: 10.1329 - mae: 10.6120 - val_loss: 7.2252 - val_mae: 7.7120\nEpoch 64/500\n33/33 [==============================] - 0s 11ms/step - loss: 10.0694 - mae: 10.5482 - val_loss: 7.2330 - val_mae: 7.7198\nEpoch 65/500\n33/33 [==============================] - 0s 10ms/step - loss: 10.0091 - mae: 10.4884 - val_loss: 7.2639 - val_mae: 7.7508\nEpoch 66/500\n33/33 [==============================] - 0s 10ms/step - loss: 9.9512 - mae: 10.4314 - val_loss: 7.3072 - val_mae: 7.7941\nEpoch 67/500\n33/33 [==============================] - 0s 10ms/step - loss: 9.8932 - mae: 10.3741 - val_loss: 7.3386 - val_mae: 7.8257\nEpoch 68/500\n33/33 [==============================] - 0s 11ms/step - loss: 9.8319 - mae: 10.3134 - val_loss: 7.3363 - val_mae: 7.8234\nEpoch 69/500\n33/33 [==============================] - 0s 11ms/step - loss: 9.7668 - mae: 10.2486 - val_loss: 7.3155 - val_mae: 7.8025\nEpoch 70/500\n33/33 [==============================] - 0s 11ms/step - loss: 9.7027 - mae: 10.1850 - val_loss: 7.2992 - val_mae: 7.7861\nEpoch 71/500\n33/33 [==============================] - 0s 10ms/step - loss: 9.6423 - mae: 10.1245 - val_loss: 7.3036 - val_mae: 7.7905\n"
],
[
"model = keras.models.load_model(\"my_checkpoint.h5\")",
"_____no_output_____"
],
[
"model.reset_states()\nrnn_forecast = model.predict(series[np.newaxis, :, np.newaxis])\nrnn_forecast = rnn_forecast[0, split_time - 1:-1, 0]",
"_____no_output_____"
],
[
"rnn_forecast.shape",
"_____no_output_____"
],
[
"plt.figure(figsize=(10, 6))\nplot_series(time_valid, x_valid)\nplot_series(time_valid, rnn_forecast)",
"_____no_output_____"
],
[
"keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a26ddf575dea91a0295972a04e0fb7541e22aab
| 5,652 |
ipynb
|
Jupyter Notebook
|
notebooks/Enrichment_analyses_human_phenotype_ontology.ipynb
|
tanghaibao/goatools
|
120448a0fc4dde024cb045a0061a537497c0780a
|
[
"BSD-2-Clause"
] | 477 |
2015-02-10T06:54:42.000Z
|
2022-03-15T12:36:11.000Z
|
notebooks/Enrichment_analyses_human_phenotype_ontology.ipynb
|
tanghaibao/goatools
|
120448a0fc4dde024cb045a0061a537497c0780a
|
[
"BSD-2-Clause"
] | 174 |
2015-02-05T18:11:14.000Z
|
2022-03-29T10:24:19.000Z
|
notebooks/Enrichment_analyses_human_phenotype_ontology.ipynb
|
tanghaibao/goatools
|
120448a0fc4dde024cb045a0061a537497c0780a
|
[
"BSD-2-Clause"
] | 202 |
2015-01-21T12:29:23.000Z
|
2022-03-01T13:26:05.000Z
| 24.362069 | 118 | 0.522293 |
[
[
[
"# Do enrichment analyses using the Human Phenotype Ontology (HPO)\nHPO files located in goatools/notebooks/data/hpo",
"_____no_output_____"
]
],
[
[
"fin_study = 'data/hpo/genes.list' # Study genes\nfin_pop = 'data/hpo/gobackground.list' # Population genes\nfin_obo = 'data/hpo/hp.obo' # DAG containing HPO terms\nfin_anno = 'data/hpo/hpo.annotation.tab' # Annotation of genes-to-HPO terms",
"_____no_output_____"
]
],
[
[
"## 1) Read list of study genes and population genes",
"_____no_output_____"
]
],
[
[
"from goatools.utils import read_geneset\n\nstudy_ids = read_geneset(fin_study)\npopulation_ids = read_geneset(fin_pop)",
" 2,252 READ: data/hpo/genes.list\n 14,446 READ: data/hpo/gobackground.list\n"
]
],
[
[
"## 2) Load the human phenotype ontology DAG",
"_____no_output_____"
]
],
[
[
"from goatools.obo_parser import GODag\n\ngodag = GODag(fin_obo)",
"data/hpo/hp.obo: fmt(1.2) rel(hp/2021-02-28) 19,498 Terms\n"
]
],
[
[
"## 3) Load the annotations of genes to sets of HPO terms",
"_____no_output_____"
]
],
[
[
"from goatools.anno.idtogos_reader import IdToGosReader\n\nannoobj = IdToGosReader(fin_anno, godag=godag)\n\nid2gos = annoobj.get_id2gos()",
"HMS:0:00:00.555350 187,934 annotations READ: data/hpo/hpo.annotation.tab \n4531 IDs in loaded association branch, human_phenotype\n"
]
],
[
[
"## 4) Run enrichment analysis on HPO terms",
"_____no_output_____"
]
],
[
[
"from goatools.go_enrichment import GOEnrichmentStudy\n\ngoeaobj = GOEnrichmentStudy(\n population_ids,\n annoobj.get_id2gos(),\n godag,\n methods=['bonferroni', 'fdr_bh'],\n pvalcalc='fisher_scipy_stats')",
"4531 IDs in loaded association branch, human_phenotype\n\nLoad Ontology Enrichment Analysis ...\nPropagating term counts up: is_a\n 26% 3,823 of 14,446 population items found in association\n"
]
],
[
[
"## 5) Run an enrichment analysis on HPO terms",
"_____no_output_____"
]
],
[
[
"results = goeaobj.run_study_nts(study_ids)",
"\nRuning Ontology Analysis: current study set of 2252 IDs.\n 29% 650 of 2,252 study items found in association\n100% 2,252 of 2,252 study items found in population(14446)\nCalculating 9,331 uncorrected p-values using fisher_scipy_stats\n"
]
],
[
[
"## 6) Print the results",
"_____no_output_____"
]
],
[
[
"print('namespace term_id e/p pval_uncorr Benjamimi/Hochberg Bonferroni study_ratio population_ratio')\nprint('--------------- -------- --- ----------- ------------------ ---------- ----------- ----------------')\npat = '{NS} {GO} {e} {PVAL:8.2e} {BH:8.2e} {BONF:8.2e} {RS:>12} {RP:>12}'\nfor ntd in sorted(results, key=lambda nt: [nt.p_uncorrected, nt.GO]):\n if ntd.p_fdr_bh < 0.05:\n print(pat.format(\n NS=ntd.NS,\n GO=ntd.GO,\n e=ntd.enrichment,\n RS='{}/{}'.format(*ntd.ratio_in_study),\n RP='{}/{}'.format(*ntd.ratio_in_pop),\n PVAL=ntd.p_uncorrected,\n BONF=ntd.p_bonferroni,\n BH=ntd.p_fdr_bh))\nprint('e: enriched')\nprint('p: purified')",
"_____no_output_____"
]
],
[
[
"Copyright (C) 2021-present, DV Klopfenstein and Haibao Tang. All rights reserved.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
4a26e4253b715d23f126b7ad8c055fa169130d40
| 1,030,087 |
ipynb
|
Jupyter Notebook
|
examples/get_concentration.ipynb
|
dmgav/PyXRF
|
225ef793e0e371557640e0d2a6de380bb9f4a557
|
[
"BSD-3-Clause"
] | 19 |
2016-05-25T21:40:41.000Z
|
2022-01-19T01:58:15.000Z
|
examples/get_concentration.ipynb
|
dmgav/PyXRF
|
225ef793e0e371557640e0d2a6de380bb9f4a557
|
[
"BSD-3-Clause"
] | 90 |
2016-01-11T17:22:05.000Z
|
2021-12-02T15:59:58.000Z
|
examples/get_concentration.ipynb
|
dmgav/PyXRF
|
225ef793e0e371557640e0d2a6de380bb9f4a557
|
[
"BSD-3-Clause"
] | 22 |
2016-10-16T17:19:19.000Z
|
2022-02-18T21:45:08.000Z
| 235.933807 | 198,533 | 0.880942 |
[
[
[
"%matplotlib notebook\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport h5py",
"_____no_output_____"
]
],
[
[
"# Get elemental map from txt file",
"_____no_output_____"
]
],
[
[
"# here we focus on element Ca",
"_____no_output_____"
],
[
"fpath_ca = 'output_txt_scan2D_48816/detsum_Ca_K_48816.txt'",
"_____no_output_____"
],
[
"d_ca = np.loadtxt(fpath_ca)",
"_____no_output_____"
],
[
"# path for ion chamber\nfpath_ic = 'output_txt_scan2D_48816/sclr1_ch4_48816.txt'",
"_____no_output_____"
],
[
"d_ic = np.loadtxt(fpath_ic)",
"_____no_output_____"
],
[
"# plot ic map\nfig, ax = plt.subplots()\nax.imshow(d_ic)",
"_____no_output_____"
],
[
"# normalized to ic\nd_ca_norm = d_ca / d_ic",
"_____no_output_____"
],
[
"# normalize to ic and plot\nfig, ax = plt.subplots()\nim = ax.imshow(d_ca_norm)\nfig.colorbar(im)",
"_____no_output_____"
]
],
[
[
"# Get elemental map from reference sample",
"_____no_output_____"
]
],
[
[
"# get elemental map from reference sample, also focus on Ca\nfpath_ref_ca = 'output_txt_scan2D_48706/detsum_Ca_K_48706.txt'\n\n# ion chamber value for reference\nfpath_ref_ic = 'output_txt_scan2D_48706/sclr1_ch4_48706.txt'",
"_____no_output_____"
],
[
"ca_ref = np.loadtxt(fpath_ref_ca)\nic_ref = np.loadtxt(fpath_ref_ic)",
"_____no_output_____"
],
[
"# normalize to ic\nca_ref_norm = ca_ref / ic_ref",
"_____no_output_____"
],
[
"# plot of reference sample\nfig, ax = plt.subplots()\nim = ax.imshow(ca_ref_norm)\nfig.colorbar(im)",
"_____no_output_____"
],
[
"# get mean\nca_ref_mean = np.mean(ca_ref_norm)",
"_____no_output_____"
],
[
"ca_ref_mean",
"_____no_output_____"
]
],
[
[
"# Calculate concentration",
"_____no_output_____"
]
],
[
[
"# normalize to reference sample to get concentration\nconcen_val = 16.6 # ug/cm2 for Ca\nca_concen = d_ca_norm / ca_ref_mean * concen_val",
"_____no_output_____"
],
[
"np.mean(ca_concen)",
"_____no_output_____"
],
[
"# plot of sample concentration\nfig, ax = plt.subplots()\nim = ax.imshow(ca_concen)\nfig.colorbar(im)",
"_____no_output_____"
]
],
[
[
"# We can also repeat the same process by using h5 file instead of txt file. This is probably much easier, as all the data can be saved to dictionary.",
"_____no_output_____"
],
[
"# Get elemental map from h5 file",
"_____no_output_____"
]
],
[
[
"fname = 'scan2D_48816.h5'\nwith h5py.File(fname, 'r') as f:\n d = f['xrfmap/detsum/counts'][:]\n fit_v = f['xrfmap/detsum/xrf_fit'][:]\n fit_name = f['xrfmap/detsum/xrf_fit_name'][:]\n scaler_v = f['xrfmap/scalers/val'][:]\n scaler_name = f['xrfmap/scalers/name'][:]",
"_____no_output_____"
],
[
"fit_name = [v.decode() for v in fit_name]\nscaler_name = [v.decode() for v in scaler_name]",
"_____no_output_____"
],
[
"d_fit = {}\nfor i, v in enumerate(fit_name):\n d_fit[str(v)] = fit_v[i, : ,:]\n \nd_scaler = {}\nfor i, v in enumerate(scaler_name):\n d_scaler[str(v)] = scaler_v[: ,:, i]",
"_____no_output_____"
],
[
"# dictionary\n\nd_fit.keys(), d_scaler.keys()",
"_____no_output_____"
],
[
"# plot normalized data\nfig, ax = plt.subplots()\nax.imshow(d_fit['Ca_K']/d_scaler['sclr1_ch4'])",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] |
[
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
4a26f47a2309fe5d2c62e4b44d6912094a4f9f70
| 800,566 |
ipynb
|
Jupyter Notebook
|
convolutionJupyter.ipynb
|
reverendX/repoEQ4
|
e06c110829848cd513c7eae54512e2dfc6081470
|
[
"MIT"
] | null | null | null |
convolutionJupyter.ipynb
|
reverendX/repoEQ4
|
e06c110829848cd513c7eae54512e2dfc6081470
|
[
"MIT"
] | null | null | null |
convolutionJupyter.ipynb
|
reverendX/repoEQ4
|
e06c110829848cd513c7eae54512e2dfc6081470
|
[
"MIT"
] | null | null | null | 383.412835 | 380,803 | 0.910851 |
[
[
[
"#Equipo 3\n#Lider/Supervisor: Emilio Gallegos A01066813\n#Participante: Josemaria Robledo Lara A01612376\n\n\n#Se importan las bibliotecas necesarias.\nimport os\nfrom scipy import signal, misc\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.color import rgb2gray\nfrom skimage import io ",
"_____no_output_____"
],
[
"#Se crea matriz de 4x4 y un kernel de 2x2\nMatriz = np.array([[1,1,1,1], [1,1,1,1], [0,0,0,0], [0,0,0,0]])\nKernel = np.array([[1,1], [-1,-1]])\nMatriz",
"_____no_output_____"
],
[
"#Se hace una operacion de convolucion entre la matriz de 4x4 y el kernel de 2x2\n\n#Se llama la función de convolución y se pasa la matriz de entrada.\n#Funcion de la libreria de scipy \ntemp = signal.convolve2d(Matriz, Kernel, mode='same') #Gracias al same se mantienen las dimensiones de la imagen de entrada \ntemp",
"_____no_output_____"
],
[
"#Función de la convolución para el filtrado de imágenes\ndef show_convolve2d(imagen, kernel):\n \n %matplotlib notebook\n plt.ion()\n \n imagen_list = []\n for d in range(3): #El 3 representa cada canal de la imagen.\n temp = signal.convolve2d(imagen[:,:,d] , kernel, boundary='symm',mode='same') #Imagen del mismo tamaño \n imagen_list.append(temp)\n\n imagen_filt = np.stack(imagen_list, axis=2)\n imagen_filt[imagen_filt > 255] = 255\n imagen_filt[imagen_filt < 0] = 0\n imagen_filt = imagen_filt.astype(\"uint8\")\n\n plt.subplot(1,2,1)\n io.imshow(imagen_filt) #Se muestra la imagen con el filtro ya aplicado.\n plt.axis('off')\n\n plt.subplot(1,2,2)\n io.imshow(imagen) #Se muestra la imagen original para futuras comparaciones.\n plt.axis('off')\n\n io.show()",
"_____no_output_____"
],
[
"#Se carga una imagen de prueba del directorio con dimensiones pequeñas\nfilename = os.path.join('repoEQ3/','prueba.jpg') \n#Se lee la carpeta que contiene la imagen prueba\nimagen = io.imread(filename)",
"_____no_output_____"
],
[
"#Filtro de Enfoque\nk=np.array([[0,-1,0],[-1,5,-1],[0,-1,0]])\nshow_convolve2d(imagen,k)\n#Se imprime",
"_____no_output_____"
],
[
"#Filtro de Desenfoque o Filtro de Media\ntam = 5\nk = np.ones((tam,tam))/(tam**2)\nshow_convolve2d(imagen,k)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a271327310b1d8881ef2b3e855081a92bfff6c7
| 73,388 |
ipynb
|
Jupyter Notebook
|
Archieved FP/pkg_ta/scripts/Archieved/TEST_RAZOR/test_yaw.ipynb
|
fadamsyah/final-project
|
636aa6c5fbb84a1325b662c48b52b5065f4c16c5
|
[
"MIT"
] | 3 |
2020-07-24T16:06:12.000Z
|
2021-04-10T11:41:19.000Z
|
Archieved FP/pkg_ta/scripts/Archieved/TEST_RAZOR/test_yaw.ipynb
|
fadamsyah/final-project
|
636aa6c5fbb84a1325b662c48b52b5065f4c16c5
|
[
"MIT"
] | null | null | null |
Archieved FP/pkg_ta/scripts/Archieved/TEST_RAZOR/test_yaw.ipynb
|
fadamsyah/final-project
|
636aa6c5fbb84a1325b662c48b52b5065f4c16c5
|
[
"MIT"
] | 3 |
2021-04-10T11:41:21.000Z
|
2021-09-09T07:38:45.000Z
| 284.449612 | 23,284 | 0.925369 |
[
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nimport numba as nb\nimport rosbag\nimport pymap3d as pm",
"Failed to load Python extension for LZ4 support. LZ4 compression will not be available.\n"
],
[
"def wrap_angle(angle):\n return (angle + np.pi) % (2 * np.pi) - np.pi\n\[email protected]()\ndef to_euler(x, y, z, w):\n \"\"\"Dari Coursera: Return as xyz (roll pitch yaw) Euler angles.\"\"\"\n roll = np.arctan2(2 * (w * x + y * z), 1 - 2 * (x**2 + y**2))\n pitch = np.arcsin(2 * (w * y - z * x))\n yaw = np.arctan2(2 * (w * z + x * y), 1 - 2 * (y**2 + z**2))\n return np.array([roll, pitch, yaw])\n# Compile the to_euler\n_ = to_euler(1.5352300785980803e-15, -1.3393747145983517e-15, -0.7692164172827881, 0.638988343698562)\n\[email protected]\ndef to_euler_v2(x, y, z, w):\n yaw = np.arctan2(2.0*(y*z + w*x), w*w - x*x - y*y + z*z)\n pitch = np.arcsin(-2.0*(x*z - w*y));\n roll = np.arctan2(2.0*(x*y + w*z), w*w + x*x - y*y - z*z)\n return np.array([roll, pitch, yaw])\n# Compile the to_euler\n_ = to_euler_v2(1.5352300785980803e-15, -1.3393747145983517e-15, -0.7692164172827881, 0.638988343698562)\n\ndef calculate_s(wp_x, wp_y):\n s = np.zeros(wp_x.shape[0])\n for i in range(1, s.shape[0]):\n s[i] = s[i-1] + np.sqrt((wp_x[i] - wp_x[i-1])**2 + (wp_y[i] - wp_y[i-1])**2)\n return s",
"_____no_output_____"
],
[
"ls",
"TEST_RAZOR_2.bag TEST_RAZOR_DIROLL_DIPITCH_2.bag\nTEST_RAZOR_3.bag TEST_RAZOR_DIROLL_DIPITCH.bag\nTEST_RAZOR_4.bag TEST_RAZOR_LAST_TEST.bag\nTEST_RAZOR_5.bag test_yaw.ipynb\nTEST_RAZOR.bag\n"
],
[
"bag = rosbag.Bag('Data Razor/TEST_RAZOR_DIROLL_DIPITCH_2.bag')",
"_____no_output_____"
],
[
"imu_t = []\nimu_a = []\nimu_w = []\nimu_q = []\nimu_rpy = []\nfor topic, msg, _ in bag.read_messages(topics=['/imu']):\n imu_t.append(msg.header.stamp.to_sec())\n acc = msg.linear_acceleration\n imu_a.append([acc.x, acc.y, acc.z])\n acc = msg.angular_velocity\n imu_w.append([acc.x, acc.y, acc.z])\n q = msg.orientation\n imu_q.append([q.x, q.y, q.z, q.w])\n imu_rpy.append(to_euler(q.x, q.y, q.z, q.w))\n imu_rpy.append(to_euler(q.x, q.y, q.z, q.w))\nimu_t = np.array(imu_t)\nimu_a = np.array(imu_a)\nimu_w = np.array(imu_w)\nimu_q = np.array(imu_q)\nimu_rpy = np.array(imu_rpy)",
"_____no_output_____"
],
[
"yaw = []\nfor topic, msg, _ in bag.read_messages(topics=['/state_2d']):\n yaw.append(msg.yaw_imu)\nyaw = np.array(yaw)",
"_____no_output_____"
],
[
"plt.plot(imu_rpy[:,-1]*180/np.pi)\n# plt.ylim(-150,-100)",
"_____no_output_____"
],
[
"plt.plot(np.unwrap(imu_rpy[:,-1])*180/np.pi)\n# plt.ylim(-150,-100)",
"_____no_output_____"
],
[
"plt.plot(imu_rpy[:,-1]*180/np.pi)\n# plt.ylim(-150,-100)",
"_____no_output_____"
]
]
] |
[
"code"
] |
[
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
4a2719fb8f2e8b5d4a3b4a9750fbc371b8689530
| 70,105 |
ipynb
|
Jupyter Notebook
|
11-Google-Cloud/02-Datalab-BigQuery.ipynb
|
rkothaka94/Business-Analytics
|
4a1580acb325937a2a57ae330d43503bf5aed82b
|
[
"MIT"
] | 34 |
2019-03-11T21:59:08.000Z
|
2022-03-31T09:46:02.000Z
|
11-Google-Cloud/02-Datalab-BigQuery.ipynb
|
rkothaka94/Business-Analytics
|
4a1580acb325937a2a57ae330d43503bf5aed82b
|
[
"MIT"
] | null | null | null |
11-Google-Cloud/02-Datalab-BigQuery.ipynb
|
rkothaka94/Business-Analytics
|
4a1580acb325937a2a57ae330d43503bf5aed82b
|
[
"MIT"
] | 37 |
2019-08-09T12:33:41.000Z
|
2022-03-22T00:07:14.000Z
| 297.055085 | 50,761 | 0.840339 |
[
[
[
"# Hello BigQuery\n\n[Google BigQuery](https://cloud.google.com/bigquery) is a fast, economical, and fully managed data warehouse for large-scale data analytics. You can use BigQuery and SQL to focus on transforming and gaining insights into your data. Datalab supports the [Standard SQL BigQuery](https://cloud.google.com/bigquery/docs/reference/standard-sql/).",
"_____no_output_____"
],
[
"This query works against the churn dataset within temp Dataset in is833-demo project. Run the next two cells to define and execute the query to see top 10 rows.",
"_____no_output_____"
]
],
[
[
"%%bq query -n customer_churn\nSELECT *\nFROM `is833-demo.temp.churn`\nLIMIT 10",
"_____no_output_____"
],
[
"%%bq query -n customer_churn\nSELECT PaymentMethod , COUNT(PaymentMethod) as count\nFROM `is833-demo.temp.churn`\nGROUP BY PaymentMethod\nORDER BY count DESC\nLIMIT 10",
"_____no_output_____"
],
[
"%%chart pie --fields PaymentMethod,count --data customer_churn\ntitle: Payment method used by the customers\nheight: 400\nwidth: 800\npieStartAngle: 20\nslices:\n 0:\n offset: .2",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"If you're a BigQuery user, you've likely used the BigQuery console to issue queries and display results. Datalab also makes it simple to issue a SQL query and see the results.\n\nAs the these sample and tutorial notebooks illustrate, Datalab goes further, allowing you to issue multiple queries, visualize the data beyond tables, and ultimately build sophisticated pipelines to use your data productively, to the fullest extent.\n\nOne additional link - the [BigQuery SQL reference](https://cloud.google.com/bigquery/docs/reference/standard-sql/). This reference is also accessible at any time from the Help menu on the toolbar on the top of the page.",
"_____no_output_____"
]
]
] |
[
"markdown",
"code",
"markdown"
] |
[
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
4a272e9bece12a8ddac198d1101b948a7958968e
| 67,284 |
ipynb
|
Jupyter Notebook
|
pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb
|
partev/pml-book
|
ead6c2b8786ed67f0f2baff0345f3324b7bb13cb
|
[
"MIT"
] | 1 |
2021-08-21T00:19:59.000Z
|
2021-08-21T00:19:59.000Z
|
pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb
|
planktonfun/pml-book
|
8d4c4cd53a56482b0cae5637b82ae7abf256e8fc
|
[
"MIT"
] | null | null | null |
pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb
|
planktonfun/pml-book
|
8d4c4cd53a56482b0cae5637b82ae7abf256e8fc
|
[
"MIT"
] | null | null | null | 33.457981 | 515 | 0.576244 |
[
[
[
"# Copyright 2021 Google LLC\n# Use of this source code is governed by an MIT-style\n# license that can be found in the LICENSE file or at\n# https://opensource.org/licenses/MIT.\n# Notebook authors: Kevin P. Murphy ([email protected])\n# and Mahmoud Soliman ([email protected])\n\n# This notebook reproduces figures for chapter 15 from the book\n# \"Probabilistic Machine Learning: An Introduction\"\n# by Kevin Murphy (MIT Press, 2021).\n# Book pdf is available from http://probml.ai",
"_____no_output_____"
]
],
[
[
"<a href=\"https://opensource.org/licenses/MIT\" target=\"_parent\"><img src=\"https://img.shields.io/github/license/probml/pyprobml\"/></a>",
"_____no_output_____"
],
[
"<a href=\"https://colab.research.google.com/github/probml/pml-book/blob/main/pml1/figure_notebooks/chapter15_neural_networks_for_sequences_figures.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
],
[
"## Figure 15.1:<a name='15.1'></a> <a name='rnn'></a> ",
"_____no_output_____"
],
[
"\n Recurrent neural network (RNN) for generating a variable length output sequence $\\mathbf y _ 1:T $ given an optional fixed length input vector $\\mathbf x $. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.1.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.2:<a name='15.2'></a> <a name='rnnTimeMachine'></a> ",
"_____no_output_____"
],
[
"\n Example output of length 500 generated from a character level RNN when given the prefix ``the''. We use greedy decoding, in which the most likely character at each step is computed, and then fed back into the model. The model is trained on the book \\em The Time Machine by H. G. Wells. ",
"_____no_output_____"
],
[
"To reproduce this figure, click the open in colab button: <a href=\"https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/rnn_torch.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"## Figure 15.3:<a name='15.3'></a> <a name='imageCaptioning'></a> ",
"_____no_output_____"
],
[
"\n Illustration of a CNN-RNN model for image captioning. The pink boxes labeled ``LSTM'' refer to a specific kind of RNN that we discuss in \\cref sec:LSTM . The pink boxes labeled $W_ \\text emb $ refer to embedding matrices for the (sampled) one-hot tokens, so that the input to the model is a real-valued vector. From https://bit.ly/2FKnqHm . Used with kind permission of Yunjey Choi. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.3.pdf\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.4:<a name='15.4'></a> <a name='rnnBiPool'></a> ",
"_____no_output_____"
],
[
"\n (a) RNN for sequence classification. (b) Bi-directional RNN for sequence classification. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.4_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.4_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.5:<a name='15.5'></a> <a name='biRNN'></a> ",
"_____no_output_____"
],
[
"\n (a) RNN for transforming a sequence to another, aligned sequence. (b) Bi-directional RNN for the same task. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.5_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.5_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.6:<a name='15.6'></a> <a name='deepRNN'></a> ",
"_____no_output_____"
],
[
"\n Illustration of a deep RNN. Adapted from Figure 9.3.1 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.6.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.7:<a name='15.7'></a> <a name='seq2seq'></a> ",
"_____no_output_____"
],
[
"\n Encoder-decoder RNN architecture for mapping sequence $\\mathbf x _ 1:T $ to sequence $\\mathbf y _ 1:T' $. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.7.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.8:<a name='15.8'></a> <a name='NMT'></a> ",
"_____no_output_____"
],
[
"\n (a) Illustration of a seq2seq model for translating English to French. The - character represents the end of a sentence. From Figure 2.4 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong. (b) Illustration of greedy decoding. The most likely French word at each step is highlighted in green, and then fed in as input to the next step of the decoder. From Figure 2.5 of <a href='#Luong2016thesis'>[Luo16]</a> . Used with kind permission of Minh-Thang Luong. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.8_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.8_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.9:<a name='15.9'></a> <a name='BPTT'></a> ",
"_____no_output_____"
],
[
"\n An RNN unrolled (vertically) for 3 time steps, with the target output sequence and loss node shown explicitly. From Figure 8.7.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.9.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.10:<a name='15.10'></a> <a name='GRU'></a> ",
"_____no_output_____"
],
[
"\n Illustration of a GRU. Adapted from Figure 9.1.3 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.10.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.11:<a name='15.11'></a> <a name='LSTM'></a> ",
"_____no_output_____"
],
[
"\n Illustration of an LSTM. Adapted from Figure 9.2.4 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.11.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.12:<a name='15.12'></a> <a name='stsProb'></a> ",
"_____no_output_____"
],
[
"\n Conditional probabilities of generating each token at each step for two different sequences. From Figures 9.8.1--9.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.12_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.12_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.13:<a name='15.13'></a> <a name='beamSearch'></a> ",
"_____no_output_____"
],
[
"\n Illustration of beam search using a beam of size $K=2$. The vocabulary is $\\mathcal Y = \\ A,B,C,D,E\\ $, with size $V=5$. We assume the top 2 symbols at step 1 are A,C. At step 2, we evaluate $p(y_1=A,y_2=y)$ and $p(y_1=C,y_2=y)$ for each $y \\in \\mathcal Y $. This takes $O(K V)$ time. We then pick the top 2 partial paths, which are $(y_1=A,y_2=B)$ and $(y_1=C,y_2=E)$, and continue in the obvious way. Adapted from Figure 9.8.3 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.13.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.14:<a name='15.14'></a> <a name='textCNN'></a> ",
"_____no_output_____"
],
[
"\n Illustration of the TextCNN model for binary sentiment classification. Adapted from Figure 15.3.5 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.14.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.15:<a name='15.15'></a> <a name='wavenet'></a> ",
"_____no_output_____"
],
[
"\n Illustration of the wavenet model using dilated (atrous) convolutions, with dilation factors of 1, 2, 4 and 8. From Figure 3 of <a href='#wavenet'>[Aar+16]</a> . Used with kind permission of Aaron van den Oord. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.15.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.16:<a name='15.16'></a> <a name='attention'></a> ",
"_____no_output_____"
],
[
"\n Attention computes a weighted average of a set of values, where the weights are derived by comparing the query vector to a set of keys. From Figure 10.3.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.16.pdf\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.17:<a name='15.17'></a> <a name='attenRegression'></a> ",
"_____no_output_____"
],
[
"\n Kernel regression in 1d. (a) Kernel weight matrix. (b) Resulting predictions on a dense grid of test points. ",
"_____no_output_____"
],
[
"To reproduce this figure, click the open in colab button: <a href=\"https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks/kernel_regression_attention.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.17_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.17_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.18:<a name='15.18'></a> <a name='seq2seqAttn'></a> ",
"_____no_output_____"
],
[
"\n Illustration of seq2seq with attention for English to French translation. Used with kind permission of Minh-Thang Luong. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.18.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.19:<a name='15.19'></a> <a name='translationHeatmap'></a> ",
"_____no_output_____"
],
[
"\n Illustration of the attention heatmaps generated while translating two sentences from Spanish to English. (a) Input is ``hace mucho frio aqui.'', output is ``it is very cold here.''. (b) Input is ``¿todavia estan en casa?'', output is ``are you still at home?''. Note that when generating the output token ``home'', the model should attend to the input token ``casa'', but in fact it seems to attend to the input token ``?''. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.19_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.19_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.20:<a name='15.20'></a> <a name='EHR'></a> ",
"_____no_output_____"
],
[
"\n Example of an electronic health record. In this example, 24h after admission to the hospital, the RNN classifier predicts the risk of death as 19.9\\%; the patient ultimately died 10 days after admission. The ``relevant'' keywords from the input clinical notes are shown in red, as identified by an attention mechanism. From Figure 3 of <a href='#Rajkomar2018'>[Alv+18]</a> . Used with kind permission of Alvin Rakomar. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.20.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.21:<a name='15.21'></a> <a name='SNLI'></a> ",
"_____no_output_____"
],
[
"\n Illustration of sentence pair entailment classification using an MLP with attention to align the premise (``I do need sleep'') with the hypothesis (``I am tired''). White squares denote active attention weights, blue squares are inactive. (We are assuming hard 0/1 attention for simplicity.) From Figure 15.5.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.21.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.22:<a name='15.22'></a> <a name='showAttendTell'></a> ",
"_____no_output_____"
],
[
"\n Image captioning using attention. (a) Soft attention. Generates ``a woman is throwing a frisbee in a park''. (b) Hard attention. Generates ``a man and a woman playing frisbee in a field''. From Figure 6 of <a href='#showAttendTell'>[Kel+15]</a> . Used with kind permission of Kelvin Xu. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.22_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.22_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.23:<a name='15.23'></a> <a name='transformerTranslation'></a> ",
"_____no_output_____"
],
[
"\n Illustration of how encoder self-attention for the word ``it'' differs depending on the input context. From https://ai.googleblog.com/2017/08/transformer-novel-neural-network.html . Used with kind permission of Jakob Uszkoreit. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.23.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.24:<a name='15.24'></a> <a name='multiHeadAttn'></a> ",
"_____no_output_____"
],
[
"\n Multi-head attention. Adapted from Figure 9.3.3 of <a href='#dive'>[Zha+20]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.24.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.25:<a name='15.25'></a> <a name='positionalEncodingSinusoids'></a> ",
"_____no_output_____"
],
[
"\n (a) Positional encoding matrix for a sequence of length $n=60$ and an embedding dimension of size $d=32$. (b) Basis functions for columsn 6 to 9. ",
"_____no_output_____"
],
[
"To reproduce this figure, click the open in colab button: <a href=\"https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks-d2l/positional_encoding.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.25_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.25_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.26:<a name='15.26'></a> <a name='transformer'></a> ",
"_____no_output_____"
],
[
"\n The transformer. From <a href='#Weng2018attention'>[Lil18]</a> . Used with kind permission of Lilian Weng. Adapted from Figures 1--2 of <a href='#Vaswani2017'>[Ash+17]</a> . ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.26.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.27:<a name='15.27'></a> <a name='attentionBakeoff'></a> ",
"_____no_output_____"
],
[
"\n Comparison of (1d) CNNs, RNNs and self-attention models. From Figure 10.6.1 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.27.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.28:<a name='15.28'></a> <a name='VIT'></a> ",
"_____no_output_____"
],
[
"\n The Vision Transformer (ViT) model. This treats an image as a set of input patches. The input is prepended with the special CLASS embedding vector (denoted by *) in location 0. The class label for the image is derived by applying softmax to the final ouput encoding at location 0. From Figure 1 of <a href='#ViT'>[Ale+21]</a> . Used with kind permission of Alexey Dosovitskiy ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.28.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.29:<a name='15.29'></a> <a name='transformers_taxonomy'></a> ",
"_____no_output_____"
],
[
"\n Venn diagram presenting the taxonomy of different efficient transformer architectures. From <a href='#Tay2020transformers'>[Yi+20]</a> . Used with kind permission of Yi Tay. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.29.pdf\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.30:<a name='15.30'></a> <a name='rand_for_fast_atten'></a> ",
"_____no_output_____"
],
[
"\n Attention matrix $\\mathbf A $ rewritten as a product of two lower rank matrices $\\mathbf Q ^ \\prime $ and $(\\mathbf K ^ \\prime )^ \\mkern -1.5mu\\mathsf T $ with random feature maps $\\boldsymbol \\phi (\\mathbf q _i) \\in \\mathbb R ^M$ and $\\boldsymbol \\phi (\\mathbf v _k) \\in \\mathbb R ^M$ for the corresponding queries/keys stored in the rows/columns. Used with kind permission of Krzysztof Choromanski. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.30.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.31:<a name='15.31'></a> <a name='fatten'></a> ",
"_____no_output_____"
],
[
"\n Decomposition of the attention matrix $\\mathbf A $ can be leveraged to improve attention computations via matrix associativity property. To compute $\\mathbf AV $, we first calculate $\\mathbf G =(\\mathbf k ^ \\prime )^ \\mkern -1.5mu\\mathsf T \\mathbf V $ and then $\\mathbf q ^ \\prime \\mathbf G $, resulting in linear in $N$ space and time complexity. Used with kind permission of Krzysztof Choromanski. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.31.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.32:<a name='15.32'></a> <a name='elmo'></a> ",
"_____no_output_____"
],
[
"\n Illustration of ELMo bidrectional language model. Here $y_t=x_ t+1 $ when acting as the target for the forwards LSTM, and $y_t = x_ t-1 $ for the backwards LSTM. (We add \\text \\em bos \\xspace and \\text \\em eos \\xspace sentinels to handle the edge cases.) From <a href='#Weng2019LM'>[Lil19]</a> . Used with kind permission of Lilian Weng. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.32.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.33:<a name='15.33'></a> <a name='GPT'></a> ",
"_____no_output_____"
],
[
"\n Illustration of (a) BERT and (b) GPT. $E_t$ is the embedding vector for the input token at location $t$, and $T_t$ is the output target to be predicted. From Figure 3 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.33_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.33_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.34:<a name='15.34'></a> <a name='bertEmbedding'></a> ",
"_____no_output_____"
],
[
"\n Illustration of how a pair of input sequences, denoted A and B, are encoded before feeding to BERT. From Figure 14.8.2 of <a href='#dive'>[Zha+20]</a> . Used with kind permission of Aston Zhang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.34.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.35:<a name='15.35'></a> <a name='bert-tasks'></a> ",
"_____no_output_____"
],
[
"\n Illustration of how BERT can be used for different kinds of supervised NLP tasks. (a) Single sentence classification (e.g., sentiment analysis); (b) Sentence-pair classification (e.g., textual entailment); (d) Single sentence tagging (e.g., shallow parsing); (d) Question answering. From Figure 4 of <a href='#bert'>[Jac+19]</a> . Used with kind permission of Ming-Wei Chang. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.35_A.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.35_B.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.35_C.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.35_D.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## Figure 15.36:<a name='15.36'></a> <a name='T5'></a> ",
"_____no_output_____"
],
[
"\n Illustration of how the T5 model (``Text-to-text Transfer Transformer'') can be used to perform multiple NLP tasks, such as translating English to German; determining if a sentence is linguistic valid or not ( \\bf CoLA stands for ``Corpus of Linguistic Acceptability''); determining the degree of semantic similarity ( \\bf STSB stands for ``Semantic Textual Similarity Benchmark''); and abstractive summarization. From Figure 1 of <a href='#T5'>[Col+19]</a> . Used with kind permission of Colin Raffel. ",
"_____no_output_____"
]
],
[
[
"#@title Click me to run setup { display-mode: \"form\" }\ntry:\n if PYPROBML_SETUP_ALREADY_RUN:\n print('skipping setup')\nexcept:\n PYPROBML_SETUP_ALREADY_RUN = True\n print('running setup...')\n !git clone https://github.com/probml/pyprobml /pyprobml &> /dev/null \n %cd -q /pyprobml/scripts\n import pyprobml_utils as pml\n import colab_utils\n import os\n os.environ[\"PYPROBML\"] = \"..\" # one above current scripts directory\n import google.colab \n from google.colab.patches import cv2_imshow\n %reload_ext autoreload \n %autoreload 2\n def show_image(img_path,size=None,ratio=None):\n img = colab_utils.image_resize(img_path, size)\n cv2_imshow(img)\n print('finished!')",
"_____no_output_____"
]
],
[
[
"<img src=\"https://github.com/probml/pml-book/raw/main/pml1/figures/Figure_15.36.png\" width=\"256\"/>",
"_____no_output_____"
],
[
"## References:\n <a name='wavenet'>[Aar+16]</a> V. Aaron, D. Sander, Z. Heiga, S. Karen, V. Oriol, G. Alex, K. Nal, S. Andrew and K. Koray. \"WaveNet: A Generative Model for Raw Audio\". abs/1609.03499 (2016). arXiv: 1609.03499 \n\n<a name='ViT'>[Ale+21]</a> D. Alexey, B. Lucas, K. A. Dirk, Z. Xiaohua, U. T. Mostafa, M. Matthias, H. G. Sylvain, U. Jakob and H. Neil. \"An Image is Worth 16x16 Words: Transformers for ImageRecognition at Scale\". (2021). \n\n<a name='Rajkomar2018'>[Alv+18]</a> R. Alvin, O. Eyal, C. Kai, D. A. Nissan, H. Michaela, L. PeterJ, L. LiuXiaobing, M. Jake, S. Mimi, S. Patrik, Y. Hector, Z. Kun, Z. Yi, F. Gerardo, D. GavinE, I. Jamie, L. Quoc, L. K. Alexander, T. Justin, W. De, W. James, W. Jimbo, L. Dana, V. L, C. Katherine, P. Michael, M. MadabushiSrinivasan, S. NigamH, B. AtulJ, H. D, C. Claire, C. GregS and D. Jeffrey. \"Scalable and accurate deep learning with electronic healthrecords\". In: NPJ Digit Med (2018). \n\n<a name='Vaswani2017'>[Ash+17]</a> V. Ashish, S. Noam, P. Niki, U. Jakob, J. Llion, G. AidanN, K. KaiserLukasz and P. Illia. \"Attention Is All You Need\". (2017). \n\n<a name='T5'>[Col+19]</a> R. Colin, S. Noam, R. Adam, L. LeeKatherine, N. Sharan, M. Michael, Z. ZhouYanqi, L. Wei and L. PeterJ. \"Exploring the Limits of Transfer Learning with a UnifiedText-to-Text Transformer\". abs/1910.10683 (2019). arXiv: 1910.10683 \n\n<a name='bert'>[Jac+19]</a> D. Jacob, C. Ming-Wei, L. Kenton and T. ToutanovaKristina. \"BERT: Pre-training of Deep Bidirectional Transformers forLanguage Understanding\". (2019). \n\n<a name='showAttendTell'>[Kel+15]</a> X. Kelvin, B. JimmyLei, K. Ryan, C. K. Aaron, S. Ruslan, Z. S and B. Yoshua. \"Show, Attend and Tell: Neural Image Caption Generation withVisual Attention\". (2015). \n\n<a name='Weng2018attention'>[Lil18]</a> W. Lilian \"Attention? Attention!\". In: lilianweng.github.io/lil-log (2018). \n\n<a name='Weng2019LM'>[Lil19]</a> W. Lilian \"Generalized Language Models\". In: lilianweng.github.io/lil-log (2019). \n\n<a name='Luong2016thesis'>[Luo16]</a> M. Luong \"Neural machine translation\". (2016). \n\n<a name='Tay2020transformers'>[Yi+20]</a> T. Yi, D. Mostafa, B. Dara and M. MetzlerDonald. \"Efficient Transformers: A Survey\". abs/2009.06732 (2020). arXiv: 2009.06732 \n\n<a name='dive'>[Zha+20]</a> A. Zhang, Z. Lipton, M. Li and A. Smola. \"Dive into deep learning\". (2020). \n\n",
"_____no_output_____"
]
]
] |
[
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] |
[
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
4a27301a70c4f5e3b297c9b203fe360c0e2540f6
| 36,631 |
ipynb
|
Jupyter Notebook
|
Demos/module-6.ipynb
|
VasuGoel/transact-sql
|
c2f43006fbf63c8af94e80a9dbe4091d76ce9f15
|
[
"MIT"
] | null | null | null |
Demos/module-6.ipynb
|
VasuGoel/transact-sql
|
c2f43006fbf63c8af94e80a9dbe4091d76ce9f15
|
[
"MIT"
] | null | null | null |
Demos/module-6.ipynb
|
VasuGoel/transact-sql
|
c2f43006fbf63c8af94e80a9dbe4091d76ce9f15
|
[
"MIT"
] | null | null | null | 54.028024 | 2,397 | 0.262182 |
[
[
[
"empty"
]
]
] |
[
"empty"
] |
[
[
"empty"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.