markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
When taking routes into consideration, __"Western"__ Subdivision, route 00500, has issued the most street sweeping citations.- Is route 00500 larger than other street sweeping routes?
top_3_routes = df_citations.groupby(['agency', 'route'])\ .size()\ .nlargest(3)\ .sort_index()\ .rename('num_citations')\ .reset_index()\ .sort_values(by='num_citations', ascending=False) top_3_routes.agency = ["DOT-WESTERN", "DOT-SOUTHERN", "DOT-CENTRAL"] data = top_3_routes.set_index(['agency', 'route']) data.plot(kind='barh', stacked=True, figsize=(12, 6), legend=None) plt.title("Agency-Route ID's with the most Street Sweeping Citations") plt.ylabel('') plt.xlabel('# of Citations (in thousands)') plt.xticks(np.arange(0, 70_001, 10_000), [str(i) for i in np.arange(0, 71, 10)]); df_citations['issue_time_num'] = df_citations.issue_time.str.replace(":00", '') df_citations['issue_time_num'] = df_citations.issue_time_num.str.replace(':', '').astype(np.int)
_____no_output_____
MIT
MVP.ipynb
Promeos/LADOT-Street-Sweeping-Transition-Pan
What is the weekly distibution of citation times?
sns.set_context('talk') plt.figure(figsize=(13, 12)) sns.boxplot(data=df_citations, x="day_of_week", y="issue_time_num", order=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"], whis=3); plt.title("Distribution Citation Issue Times Throughout the Week") plt.xlabel('') plt.ylabel('Issue Time (24HR)') plt.yticks(np.arange(0, 2401, 200), [str(i) + ":00" for i in range(0, 25, 2)]);
_____no_output_____
MIT
MVP.ipynb
Promeos/LADOT-Street-Sweeping-Transition-Pan
New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version CheckPlotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
import plotly plotly.__version__
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Custom Discretized Heatmap Colorscale
import plotly.plotly as py py.iplot([{ 'z': [ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ], 'type': 'heatmap', 'colorscale': [ # Let first 10% (0.1) of the values have color rgb(0, 0, 0) [0, 'rgb(0, 0, 0)'], [0.1, 'rgb(0, 0, 0)'], # Let values between 10-20% of the min and max of z # have color rgb(20, 20, 20) [0.1, 'rgb(20, 20, 20)'], [0.2, 'rgb(20, 20, 20)'], # Values between 20-30% of the min and max of z # have color rgb(40, 40, 40) [0.2, 'rgb(40, 40, 40)'], [0.3, 'rgb(40, 40, 40)'], [0.3, 'rgb(60, 60, 60)'], [0.4, 'rgb(60, 60, 60)'], [0.4, 'rgb(80, 80, 80)'], [0.5, 'rgb(80, 80, 80)'], [0.5, 'rgb(100, 100, 100)'], [0.6, 'rgb(100, 100, 100)'], [0.6, 'rgb(120, 120, 120)'], [0.7, 'rgb(120, 120, 120)'], [0.7, 'rgb(140, 140, 140)'], [0.8, 'rgb(140, 140, 140)'], [0.8, 'rgb(160, 160, 160)'], [0.9, 'rgb(160, 160, 160)'], [0.9, 'rgb(180, 180, 180)'], [1.0, 'rgb(180, 180, 180)'] ], 'colorbar': { 'tick0': 0, 'dtick': 1 } }], filename='heatmap-discrete-colorscale')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Colorscale for Scatter Plots
import plotly.plotly as py import plotly.graph_objs as go data = [ go.Scatter( y=[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5], marker=dict( size=16, cmax=39, cmin=0, color=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39], colorbar=dict( title='Colorbar' ), colorscale='Viridis' ), mode='markers') ] fig = go.Figure(data=data) py.iplot(fig)
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Colorscale for Contour Plot
import plotly.plotly as py import plotly.graph_objs as go data = [ go.Contour( z=[[10, 10.625, 12.5, 15.625, 20], [5.625, 6.25, 8.125, 11.25, 15.625], [2.5, 3.125, 5., 8.125, 12.5], [0.625, 1.25, 3.125, 6.25, 10.625], [0, 0.625, 2.5, 5.625, 10]], colorscale='Jet', ) ] py.iplot(data, filename='simple-colorscales-colorscale')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Custom Heatmap Colorscale
import plotly.plotly as py import plotly.graph_objs as go import six.moves.urllib import json response = six.moves.urllib.request.urlopen('https://raw.githubusercontent.com/plotly/datasets/master/custom_heatmap_colorscale.json') dataset = json.load(response) data = [ go.Heatmap( z=dataset['z'], colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'], [0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']] ) ] py.iplot(data, filename='custom-colorscale')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Custom Contour Plot Colorscale
import plotly.plotly as py import plotly.graph_objs as go data = [ go.Contour( z=[[10, 10.625, 12.5, 15.625, 20], [5.625, 6.25, 8.125, 11.25, 15.625], [2.5, 3.125, 5., 8.125, 12.5], [0.625, 1.25, 3.125, 6.25, 10.625], [0, 0.625, 2.5, 5.625, 10]], colorscale=[[0, 'rgb(166,206,227)'], [0.25, 'rgb(31,120,180)'], [0.45, 'rgb(178,223,138)'], [0.65, 'rgb(51,160,44)'], [0.85, 'rgb(251,154,153)'], [1, 'rgb(227,26,28)']], ) ] py.iplot(data, filename='colorscales-custom-colorscale')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Custom Colorbar
import plotly.plotly as py import plotly.graph_objs as go import six.moves.urllib import json response = six.moves.urllib.request.urlopen('https://raw.githubusercontent.com/plotly/datasets/master/custom_heatmap_colorscale.json') dataset = json.load(response) data = [ go.Heatmap( z=dataset['z'], colorscale=[[0.0, 'rgb(165,0,38)'], [0.1111111111111111, 'rgb(215,48,39)'], [0.2222222222222222, 'rgb(244,109,67)'], [0.3333333333333333, 'rgb(253,174,97)'], [0.4444444444444444, 'rgb(254,224,144)'], [0.5555555555555556, 'rgb(224,243,248)'], [0.6666666666666666, 'rgb(171,217,233)'],[0.7777777777777778, 'rgb(116,173,209)'], [0.8888888888888888, 'rgb(69,117,180)'], [1.0, 'rgb(49,54,149)']], colorbar = dict( title = 'Surface Heat', titleside = 'top', tickmode = 'array', tickvals = [2,50,100], ticktext = ['Hot','Mild','Cool'], ticks = 'outside' ) ) ] py.iplot(data, filename='custom-colorscale-colorbar')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Dash Example
from IPython.display import IFrame IFrame(src= "https://dash-simple-apps.plotly.host/dash-colorscaleplot/" ,width="100%" ,height="650px", frameBorder="0")
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Find the dash app source code [here](https://github.com/plotly/simple-example-chart-apps/tree/master/colorscale) Reference See https://plot.ly/python/reference/ for more information and chart attribute options!
from IPython.display import display, HTML display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />')) display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">')) ! pip install git+https://github.com/plotly/publisher.git --upgrade import publisher publisher.publish( 'colorscales.ipynb', 'python/colorscales/', 'Colorscales', 'How to set colorscales and heatmap colorscales in Python and Plotly. Divergent, sequential, and qualitative colorscales.', title = 'Colorscales in Python | Plotly', has_thumbnail='true', thumbnail='thumbnail/heatmap_colorscale.jpg', language='python', page_type='example_index', display_as='style_opt', order=11, ipynb= '~notebook_demo/187')
_____no_output_____
CC-BY-3.0
_posts/python/style/colorscales/colorscales.ipynb
ayulockin/documentation
Clustering Chicago Public Libraries by Top 10 Nearby Venues Author: Kunyu HeUniversity of Chicago CAPP'20 Executive Summary In this notebook, I clustered 80 public libraries in the city of Chicago into 7 clusters, based on the categories of their top ten venues nearby. It would be a nice guide for those who would like to spend their days in these libraries, exploring their surroundings, but become tired of staying in only one or few of them over time. The rest of this notebook is organized as follows:[Data]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aData)) section briefly introduces the data source. [Methodology]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aMethodology)) section briefly introduced the unsupervised learning algorithms used. In the [Imports and Format Parameters]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aImports-and-Format-Parameters)) section, I install and import the Python libraries used and set the global constants for future use. [Getting and Cleaning Data]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aGetting-and-Cleaning-Data)) sections cotains code downloading and cleaning public library and nearby venues data from external sources. I perform dimension reduction, clustering and labelling mainly in the [Data Analysis]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aData-Analysis)) section. Finally, resulting folium map is presented in the [Results]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aResults)) section and [Discussions]((https://dataplatform.cloud.ibm.com/data/jupyter2/runtimeenv2/v1/wdpx/service/notebook/conda2x4b4a004761bfc4fb7999c959d3c200ec7/dsxjpy/dded3b111c2248748711f2e0af8908a960fd32ff81d51b803ea16ca6d27c85a032d73d3162155e40276ce416ddb676be350986f93c2c59/container/notebooks/576fdac1-e752-47e2-ba6d-c97780698b85?project=b4a00476-1bfc-4fb7-999c-959d3c200ec7&api=v2&env=aDiscussions)) section covers caveats and potential improvements. Data Information of the public libraries is provided by [Chicago Public Library](https://www.chipublib.org/). You can access the data [here]((https://data.cityofchicago.org/Education/Libraries-Locations-Hours-and-Contact-Information/x8fc-8rcq)).Information of the top venues near to (within a range of 500 meters) the public libraries is acquired from [FourSquare API](https://developer.foursquare.com/). You can explore the surroundings of any geographical coordinates of interest with a developer account. Methodology The clustering algorithms used include:* [Principal Component Analysis]((https://en.wikipedia.org/wiki/Principal_component_analysis)) with [Truncated SVD]((http://infolab.stanford.edu/pub/cstr/reports/na/m/86/36/NA-M-86-36.pdf));* [KMeans Clustering]((https://en.wikipedia.org/wiki/K-means_clustering));* [Hierarchical Clustering]((https://en.wikipedia.org/wiki/Hierarchical_clustering)) with [Ward's Method]((https://en.wikipedia.org/wiki/Ward%27s_method)).PCA with TSVD is used for reducing the dimension of our feature matrix, which is a [sparse matrix]((https://en.wikipedia.org/wiki/Sparse_matrix)). KMeans and hierarchical clusering are applied to cluster the libraries in terms of their top ten nearby venue categories and the final labels are derived from hierarchical clustering with ward distance. Imports and Format Parameters
import pandas as pd import numpy as np import re import requests import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties from pandas.io.json import json_normalize from sklearn.decomposition import TruncatedSVD from sklearn.cluster import KMeans from scipy.cluster.hierarchy import linkage, dendrogram, fcluster
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
For visualization, install [folium](https://github.com/python-visualization/folium) and make an additional import.
!conda install --quiet -c conda-forge folium --yes import folium %matplotlib inline title = FontProperties() title.set_family('serif') title.set_size(16) title.set_weight('bold') axis = FontProperties() axis.set_family('serif') axis.set_size(12) plt.rcParams['figure.figsize'] = [12, 8]
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Hard-code the geographical coordinates of the City of Chicago based on [this]((https://www.latlong.net/place/chicago-il-usa-1855.html)) page. Also prepare formatting parameters for folium map markers.
LATITUDE, LOGITUDE = 41.881832, -87.623177 ICON_COLORS = ['red', 'blue', 'green', 'purple', 'orange', 'beige', 'darked'] HTML = """ <center><h4><b>Library {}</b></h4></center> <h5><b>Cluster:</b> {};</h5> <h5><b>Hours of operation:</b><br> {}</h5> <h5><b>Top five venues:</b><br> <center>{}<br> {}<br> {}<br> {}<br> {}</center></h5> """
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Getting and Cleaning Data Public Library Data
!wget --quiet https://data.cityofchicago.org/api/views/x8fc-8rcq/rows.csv?accessType=DOWNLOAD -O libraries.csv lib = pd.read_csv('libraries.csv', usecols=['NAME ', 'HOURS OF OPERATION', 'LOCATION']) lib.columns = ['library', 'hours', 'location'] lib.info()
<class 'pandas.core.frame.DataFrame'> RangeIndex: 80 entries, 0 to 79 Data columns (total 3 columns): library 80 non-null object hours 80 non-null object location 80 non-null object dtypes: object(3) memory usage: 2.0+ KB
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Notice that locations are stored as strings of tuples. Applying the following function to `lib`, we can convert `location` into two separate columns of latitudes and longitudes of the libraries.
def sep_location(row): """ Purpose: seperate the string of location in a given row, convert it into a tuple of floats, representing latitude and longitude of the library respectively Inputs: row (PandasSeries): a row from the `lib` dataframe Outputs: (tuple): of floats representing latitude and longitude of the library """ return tuple(float(re.compile('[()]').sub("", coordinate)) for \ coordinate in row.location.split(', ')) lib[['latitude', 'longitude']] = lib.apply(sep_location, axis=1).apply(pd.Series) lib.drop('location', axis=1, inplace=True) lib.head()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Now data on the public libraries is ready for analysis. Venue Data Use sensitive code cell below to enter FourSquare credentials.
# The code was removed by Watson Studio for sharing.
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Get top ten venues near to the libraries and store data into the `venues` dataframe, with radius set to 1000 meters by default. You can update the `VERSION` parameter to get up-to-date venue information.
VERSION = '20181206' FEATURES = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng'] def get_venues(libraries, latitudes, longitudes, limit=10, radius=1000.0): """ Purpose: download nearby venues information through FourSquare API in a dataframe Inputs: libraries (PandasSeries): names of the public libraries latitudes (PandasSeries): latitudes of the public libraries longitudes (PandasSeries): longitudes of the public libraries limit (int): number of top venues to explore, default to 10 radius (float): range of the circle coverage to define 'nearby', default to 1000.0 Outputs: (DataFrame) """ venues_lst = [] for library, lat, lng in zip(libraries, latitudes, longitudes): url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format( \ CLIENT_ID, CLIENT_SECRET, VERSION, lat, lng, radius, limit) items = requests.get(url).json()["response"]['groups'][0]['items'] venues_lst.append([(library, lat, lng, \ item['venue']['name'], \ item['venue']['location']['lat'], item['venue']['location']['lng'], \ item['venue']['categories'][0]['name']) for item in items]) venues = pd.DataFrame([item for venues_lst in venues_lst for item in venues_lst]) venues.columns = ['Library', 'Library Latitude', 'Library Longitude', \ 'Venue', 'Venue Latitude', 'Venue Longitude', 'Venue Category'] return venues venues = get_venues(lib.library, lib.latitude, lib.longitude) venues.head()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Count unique libraries, venues and vanue categories in our `venues` dataframe.
print('There are {} unique libraries, {} unique venues and {} unique categories.'.format( \ len(venues.Library.unique()), \ len(venues.Venue.unique()), \ len(venues['Venue Category'].unique())))
There are 80 unique libraries, 653 unique venues and 173 unique categories.
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Now our `venues` data is also ready for furtehr analysis. Data Analysis Data Preprocessing Apply one-hot encoding to get our feature matrix, group the venues by libraries and calculate the frequency of each venue category around specific library by taking the mean.
features = pd.get_dummies(venues['Venue Category'], prefix="", prefix_sep="") features.insert(0, 'Library Name', venues.Library) X = features.groupby(['Library Name']).mean().iloc[:, 1:] X.head()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
There are too many categories of venues in our features dataframe. Perform PCA to reduce the dimension of our data. Notice here most of the data entries in our feature matrix is zero, which means our data is sparse data, perform dimension reduction with truncated SVD. First, attempt to find the least number of dimensions to keep 85% of the variance and transform the feature matrix.
tsvd = TruncatedSVD(n_components=X.shape[1]-1, random_state=0).fit(X) least_n = np.argmax(tsvd.explained_variance_ratio_.cumsum() > 0.85) print("In order to keep 85% of total variance, we need to keep at least {} dimensions.".format(least_n)) X_t = pd.DataFrame(TruncatedSVD(n_components=least_n, random_state=0).fit_transform(X))
In order to keep 85% of total variance, we need to keep at least 36 dimensions.
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Use KMeans on the transformed data and find the best number of k below.
ks = np.arange(1, 51) inertias = [] for k in ks: model = KMeans(n_clusters=k, random_state=0).fit(X_t) inertias.append(model.inertia_) plt.plot(ks, inertias, linewidth=2) plt.title("Figure 1 KMeans: Finding Best k", fontproperties=title) plt.xlabel('Number of Clusters (k)', fontproperties=axis) plt.ylabel('Within-cluster Sum-of-squares', fontproperties=axis) plt.xticks(np.arange(1, 51, 2)) plt.show()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
It's really hard to decide based on elbow plot, as the downward trend lasts until 50. Alternatively, try Ward Hierachical Clustering method.
merging = linkage(X_t, 'ward') plt.figure(figsize=[20, 10]) dendrogram(merging, leaf_rotation=90, leaf_font_size=10, distance_sort='descending', show_leaf_counts=True) plt.axhline(y=0.65, dashes=[6, 2], c='r') plt.xlabel('Library Names', fontproperties=axis) plt.title("Figure 2 Hierachical Clustering with Ward Distance: Cutting at 0.65", fontproperties=title) plt.show()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
The result is way better than KMeans. We see six clusters cutting at approximately 0.70. Label the clustered libraries below. Join the labelled library names with `lib` to bind geographical coordinates and hours of operation of the puclic libraries.
labels = fcluster(merging, t=0.65, criterion='distance') df = pd.DataFrame(list(zip(X.index.values, labels))) df.columns = ['library', 'cluster'] merged = pd.merge(lib, df, how='inner', on='library') merged.head()
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Results Create a `folium.Map` instance `chicago` with initial zoom level of 11.
chicago = folium.Map(location=[LATITUDE, LOGITUDE], zoom_start=11)
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Check the clustered map! Click on the icons to see the name, hours of operation and top five nearby venues of each public library in the city of Chicago!
for index, row in merged.iterrows(): venues_name = venues[venues.Library == row.library].Venue.values label = folium.Popup(HTML.format(row.library, row.cluster, row.hours, venues_name[0], venues_name[1], venues_name[2], venues_name[3], venues_name[4]), parse_html=False) folium.Marker([row.latitude, row.longitude], popup=label, icon=folium.Icon(color=ICON_COLORS[row.cluster-1], icon='book')).add_to(chicago) chicago
_____no_output_____
MIT
Clustering Chicago Public Libraries.ipynb
KunyuHe/Clustering-Chicago-Public-Libraries
Simulation Test Introduction
import sys import random import numpy as np import pylab from scipy import stats sys.path.insert(0, '../simulation') from environment import Environment from predator import Predator params = { 'env_size': 1000, 'n_patches': 20, 'n_trials': 100, 'max_moves': 5000, 'max_entities_per_patch': 50, 'min_entities_per_patch': 5, } entity_results = [] captured_results = [] for trial in range(params['n_trials']): # Set up the environment env = Environment(params['env_size'], params['env_size'], params['n_patches']) entities = random.randint( params['min_entities_per_patch'], params['max_entities_per_patch'] ) for patch in env.children: patch.create_entities(entities) pred = Predator() pred.xpos = env.length / 2.0 pred.y_pos = env.width / 2.0 pred.parent = env for i in range(params['max_moves']): pred.move() entity = pred.detect() pred.capture(entity) entity_results.append(entities) captured_results.append(len(pred.captured)) x = np.array(entity_results) y = np.array(captured_results) slope, intercept, r_value, p_value, slope_std_error = stats.linregress(x, y) print "Slope, intercept:", slope, intercept print "R-squared:", r_value**2 # Calculate some additional outputs predict_y = intercept + slope * x pred_error = y - predict_y degrees_of_freedom = len(x) - 2 residual_std_error = np.sqrt(np.sum(pred_error**2) / degrees_of_freedom) print "Residual Std Error = ", residual_std_error # Plotting pylab.plot(x, y, 'o') pylab.plot(x, predict_y, 'k-') pylab.show() z = np.divide(np.multiply(y, 1.0), np.multiply(x, 1.0)) pylab.plot(x, z, 'o')
_____no_output_____
MIT
src/ipython/45 Simulation_Test.ipynb
rah/optimal-search
Testing a hypothesis -- non-stationary or time-reversibleWe evaluate whether the GTR model is sufficient for a data set, compared with the GN (non-stationary general nucleotide model).
from cogent3.app import io, evo, sample loader = io.load_aligned(format="fasta", moltype="dna") aln = loader("../data/primate_brca1.fasta") tree = "../data/primate_brca1.tree" sm_args = dict(optimise_motif_probs=True) null = evo.model("GTR", tree=tree, sm_args=sm_args) alt = evo.model("GN", tree=tree, sm_args=sm_args) hyp = evo.hypothesis(null, alt) result = hyp(aln) type(result)
_____no_output_____
BSD-3-Clause
doc/app/evo-hypothesis.ipynb
GavinHuttley/c3test
`result` is a `hypothesis_result` object. The `repr()` displays the likelihood ratio test statistic, degrees of freedom and associated p-value>
result
_____no_output_____
BSD-3-Clause
doc/app/evo-hypothesis.ipynb
GavinHuttley/c3test
In this case, we accept the null given the p-value is > 0.05. We still use this object to demonstrate the properties of a `hypothesis_result`. `hypothesis_result` has attributes and keys Accessing the test statistics
result.LR, result.df, result.pvalue
_____no_output_____
BSD-3-Clause
doc/app/evo-hypothesis.ipynb
GavinHuttley/c3test
The null hypothesisThis model is accessed via the `null` attribute.
result.null result.null.lf
_____no_output_____
BSD-3-Clause
doc/app/evo-hypothesis.ipynb
GavinHuttley/c3test
The alternate hypothesis
result.alt.lf
_____no_output_____
BSD-3-Clause
doc/app/evo-hypothesis.ipynb
GavinHuttley/c3test
1. Converting words or sentences into numeric vectors is fundamental when working with text data. To make sure you are solid on how these vectors work, please generate the tf-idf vectors for the last three sentences of the example we gave at the beginning of this checkpoint. If you are feeling uncertain, have your mentor walk you through it. * 4: 1.585, 1, 0, 1, 1.585, 0,0,0,0* 5: 0,0,0,0,0, .585, 1, 1.585, 1* 6: 0,0,0,0,0,0, 1, 0, 2
# utility function for standard text cleaning def text_cleaner(text): # visual inspection identifies a form of punctuation spaCy does not # recognize: the double dash '--'. Better get rid of it now! text = re.sub(r'--',' ',text) text = re.sub("[\[].*?[\]]", "", text) text = re.sub(r"(\b|\s+\-?|^\-?)(\d+|\d*\.\d+)\b", " ", text) text = ' '.join(text.split()) return text # load and clean the data. persuasion = gutenberg.raw('austen-persuasion.txt') alice = gutenberg.raw('carroll-alice.txt') # the chapter indicator is idiosyncratic persuasion = re.sub(r'Chapter \d+', '', persuasion) alice = re.sub(r'CHAPTER .*', '', alice) alice = text_cleaner(alice) persuasion = text_cleaner(persuasion) # parse the cleaned novels. this can take a bit nlp = spacy.load('en_core_web_sm') alice_doc = nlp(alice) persuasion_doc = nlp(persuasion) # group into sentences alice_sents = [[sent, "Carroll"] for sent in alice_doc.sents] persuasion_sents = [[sent, "Austen"] for sent in persuasion_doc.sents] # combine the sentences from the two novels into one data frame sentences = pd.DataFrame(alice_sents + persuasion_sents, columns = ["text", "author"]) sentences.head() # get rid off stop words and punctuation # and lemmatize the tokens for i, sentence in enumerate(sentences["text"]): sentences.loc[i, "text"] = " ".join( [token.lemma_ for token in sentence if not token.is_punct and not token.is_stop]) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=0.5, min_df=2, use_idf=True, norm=u'l2', smooth_idf=True) # applying the vectorizer X = vectorizer.fit_transform(sentences["text"]) tfidf_df = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) sentences = pd.concat([tfidf_df, sentences[["text", "author"]]], axis=1) # keep in mind that the log base 2 of 1 is 0, # so a tf-idf score of 0 indicates that the word was present once in that sentence. sentences.head() sentences.loc[4]
_____no_output_____
MIT
NLPFE2.ipynb
AsterLaoWhy/Thinkful
2. In the 2-grams example above, we only used 2-grams as our features. This time, use both 1-grams and 2-grams together as your feature set. Run the same models in the example and compare the results.
# utility function for standard text cleaning def text_cleaner(text): # visual inspection identifies a form of punctuation spaCy does not # recognize: the double dash '--'. Better get rid of it now! text = re.sub(r'--',' ',text) text = re.sub("[\[].*?[\]]", "", text) text = re.sub(r"(\b|\s+\-?|^\-?)(\d+|\d*\.\d+)\b", " ", text) text = ' '.join(text.split()) return text # load and clean the data. persuasion = gutenberg.raw('austen-persuasion.txt') alice = gutenberg.raw('carroll-alice.txt') # the chapter indicator is idiosyncratic persuasion = re.sub(r'Chapter \d+', '', persuasion) alice = re.sub(r'CHAPTER .*', '', alice) alice = text_cleaner(alice) persuasion = text_cleaner(persuasion) # parse the cleaned novels. this can take a bit nlp = spacy.load('en') alice_doc = nlp(alice) persuasion_doc = nlp(persuasion) # group into sentences alice_sents = [[sent, "Carroll"] for sent in alice_doc.sents] persuasion_sents = [[sent, "Austen"] for sent in persuasion_doc.sents] # combine the sentences from the two novels into one data frame sentences = pd.DataFrame(alice_sents + persuasion_sents, columns = ["text", "author"]) sentences.head() # get rid off stop words and punctuation # and lemmatize the tokens for i, sentence in enumerate(sentences["text"]): sentences.loc[i, "text"] = " ".join( [token.lemma_ for token in sentence if not token.is_punct and not token.is_stop]) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer( max_df=0.5, min_df=2, use_idf=True, norm=u'l2', smooth_idf=True, ngram_range=(1,2)) # applying the vectorizer X = vectorizer.fit_transform(sentences["text"]) tfidf_df = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) sentences = pd.concat([tfidf_df, sentences[["text", "author"]]], axis=1) # keep in mind that the log base 2 of 1 is 0, # so a tf-idf score of 0 indicates that the word was present once in that sentence. sentences.head() from sklearn.linear_model import LogisticRegression from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.model_selection import train_test_split Y = sentences['author'] X = np.array(sentences.drop(['text','author'], 1)) # We split the dataset into train and test sets X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.4, random_state=123) # Models lr = LogisticRegression() rfc = RandomForestClassifier() gbc = GradientBoostingClassifier() lr.fit(X_train, y_train) rfc.fit(X_train, y_train) gbc.fit(X_train, y_train) print("----------------------Logistic Regression Scores----------------------") print('Training set score:', lr.score(X_train, y_train)) print('\nTest set score:', lr.score(X_test, y_test)) print("----------------------Random Forest Scores----------------------") print('Training set score:', rfc.score(X_train, y_train)) print('\nTest set score:', rfc.score(X_test, y_test)) print("----------------------Gradient Boosting Scores----------------------") print('Training set score:', gbc.score(X_train, y_train)) print('\nTest set score:', gbc.score(X_test, y_test))
----------------------Logistic Regression Scores---------------------- Training set score: 0.9036488027366021 Test set score: 0.8555555555555555 ----------------------Random Forest Scores---------------------- Training set score: 0.9694982896237172 Test set score: 0.8414529914529915 ----------------------Gradient Boosting Scores---------------------- Training set score: 0.8246864310148233 Test set score: 0.8102564102564103
MIT
NLPFE2.ipynb
AsterLaoWhy/Thinkful
Training Collaborative Experts on MSR-VTTThis notebook shows how to download code that trains a Collaborative Experts model with GPT-1 + NetVLAD on the MSR-VTT Dataset. Setup* Download Code and Dependencies* Import Modules* Download Language Model Weights* Download Datasets* Generate Encodings for Dataset Captions Code Downloading and Dependency Downloading* Specify tensorflow version* Clone repository from Github* `cd` into the correct directory* Install the requirements
%tensorflow_version 2.x !git clone https://github.com/googleinterns/via-content-understanding.git %cd via-content-understanding/videoretrieval/ !pip install -r requirements.txt !pip install --upgrade tensorflow_addons
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Importing Modules
import tensorflow as tf import languagemodels import train.encoder_datasets import train.language_model import experts import datasets import datasets.msrvtt.constants import os import models.components import models.encoder import helper.precomputed_features from tensorflow_addons.activations import mish import tensorflow_addons as tfa import metrics.loss
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Language Model Downloading* Download GPT-1
gpt_model = languagemodels.OpenAIGPTModel()
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Dataset downloading* Downlaod Datasets* Download Precomputed Features
datasets.msrvtt_dataset.download_dataset()
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Note: The system `curl` is more memory efficent than the download function in our codebase, so here `curl` is used rather than the download function in our codebase.
url = datasets.msrvtt.constants.features_tar_url path = datasets.msrvtt.constants.features_tar_path os.system(f"curl {url} > {path}") helper.precomputed_features.cache_features( datasets.msrvtt_dataset, datasets.msrvtt.constants.expert_to_features, datasets.msrvtt.constants.features_tar_path,)
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Embeddings Generation* Generate Embeddings for MSR-VTT* **Note: this will take 20-30 minutes on a colab, depending on the GPU**
train.language_model.generate_and_cache_contextual_embeddings( gpt_model, datasets.msrvtt_dataset)
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Training* Build Train Datasets* Initialize Models* Compile Encoders* Fit Model* Test Model Datasets Generation
experts_used = [ experts.i3d, experts.r2p1d, experts.resnext, experts.senet, experts.speech_expert, experts.ocr_expert, experts.audio_expert, experts.densenet, experts.face_expert] train_ds, valid_ds, test_ds = ( train.encoder_datasets.generate_encoder_datasets( gpt_model, datasets.msrvtt_dataset, experts_used))
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Model Initialization
class MishLayer(tf.keras.layers.Layer): def call(self, inputs): return mish(inputs) mish(tf.Variable([1.0])) text_encoder = models.components.TextEncoder( len(experts_used), num_netvlad_clusters=28, ghost_clusters=1, language_model_dimensionality=768, encoded_expert_dimensionality=512, residual_cls_token=False, ) video_encoder = models.components.VideoEncoder( num_experts=len(experts_used), experts_use_netvlad=[False, False, False, False, True, True, True, False, False], experts_netvlad_shape=[None, None, None, None, 19, 43, 8, None, None], expert_aggregated_size=512, encoded_expert_dimensionality=512, g_mlp_layers=3, h_mlp_layers=0, make_activation_layer=MishLayer) encoder = models.encoder.EncoderForFrozenLanguageModel( video_encoder, text_encoder, 0.0938, [1, 5, 10, 50], 20)
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Encoder Compliation
def build_optimizer(lr=0.001): learning_rate_scheduler = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=lr, decay_steps=101, decay_rate=0.95, staircase=True) return tf.keras.optimizers.Adam(learning_rate_scheduler) encoder.compile(build_optimizer(0.1), metrics.loss.bidirectional_max_margin_ranking_loss) train_ds_prepared = (train_ds .shuffle(1000) .batch(64, drop_remainder=True) .prefetch(tf.data.experimental.AUTOTUNE)) encoder.video_encoder.trainable = True encoder.text_encoder.trainable = True
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Model fitting
encoder.fit( train_ds_prepared, epochs=100, )
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Tests
captions_per_video = 20 num_videos_upper_bound = 100000 ranks = [] for caption_index in range(captions_per_video): batch = next(iter(test_ds.shard(captions_per_video, caption_index).batch( num_videos_upper_bound))) video_embeddings, text_embeddings, mixture_weights = encoder.forward_pass( batch, training=False) similarity_matrix = metrics.loss.build_similarity_matrix( video_embeddings, text_embeddings, mixture_weights, batch[-1]) rankings = metrics.rankings.compute_ranks(similarity_matrix) ranks += list(rankings.numpy()) def recall_at_k(ranks, k): return len(list(filter(lambda i: i <= k, ranks))) / len(ranks) median_rank = sorted(ranks)[len(ranks)//2] mean_rank = sum(ranks)/len(ranks) print(f"Median Rank: {median_rank}") print(f"Mean Rank: {mean_rank}") for k in [1, 5, 10, 50]: recall = recall_at_k(ranks, k) print(f"R@{k}: {recall}")
_____no_output_____
Apache-2.0
videoretrieval/Demo notebook GPT-1.ipynb
googleinterns/via-content-understanding
Ejercicio 4
import numpy as np import math as math import scipy.stats as stats import matplotlib.pyplot as plt def normalGenerator(media, desvio,nroMuestras): c = math.sqrt(2*math.exp(1)/np.pi); t = np.random.exponential(scale=1, size=nroMuestras); p = list(); for i in t: p.append(fx(i)/(c*fy(i))); z = list(); for n in range(1,nroMuestras): r = np.random.uniform(); if (r < p[n]): r2 = np.random.uniform(); if (r2 < 0.5): z.append(t[n]*desvio+media); else: z.append(t[n]*-1*desvio+media); return z; def fx(x): return math.exp(-x**2/2)/math.sqrt(2*np.pi); def fy(y): return math.exp(-y); results= normalGenerator(35,5,100000); plt.hist(results,bins=200);
_____no_output_____
MIT
tp1/Ejercicio 4.ipynb
NicoGallegos/fiuba-simulacion-grupo6
VARIANZA
print(np.var(results));
25.0067772918
MIT
tp1/Ejercicio 4.ipynb
NicoGallegos/fiuba-simulacion-grupo6
MEDIA
print(np.mean(results));
34.9540678007
MIT
tp1/Ejercicio 4.ipynb
NicoGallegos/fiuba-simulacion-grupo6
DESVIACION ESTANDAR
print(np.std(results));
5.00067768325
MIT
tp1/Ejercicio 4.ipynb
NicoGallegos/fiuba-simulacion-grupo6
Un dictionnaire français-indonésienVous disposez d’un fichier tabulaire dans le répertoire *data* avec une liste de mots en français et, en regard, leur traduction en indonésien. Complétez le programme ci-dessous afin que, pour toute entrée saisie par un utilisateur, il renvoie la traduction en indonésien ou un message d’erreur.
#!/usr/bin/env python #-*- coding: utf-8 -*- # # Modules # import csv # # User functions # def load_data(path): """Loads a data in csv format path -- path to data """ lines = [] with open(path) as csvfile: reader = csv.reader(csvfile, delimiter='\t') for line in reader: lines.append(tuple(line)) return lines def main(path_to_data): """Main function. path_to_data -- csv file """ lines = load_data(path_to_data) # # Main procedure # if __name__ == "__main__": path_to_data = '../data/french-indonesian.tsv' main(path_to_data)
_____no_output_____
MIT
2.data-structures/exercises/5.french-indonesian-dictionary.ipynb
mdjamina/python-M1TAL
k-Nearest Neighbor (kNN) exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*The kNN classifier consists of two stages:- During training, the classifier takes the training data and simply remembers it- During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples- The value of k is cross-validatedIn this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
# Run some setup code for this notebook. import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt from __future__ import print_function # This is a bit of magic to make matplotlib figures appear inline in the notebook # rather than in a new window. %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # Some more magic so that the notebook will reload external python modules; # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 # Load the raw CIFAR-10 data. cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # As a sanity check, we print out the size of the training and test data. print('Training data shape: ', X_train.shape) print('Training labels shape: ', y_train.shape) print('Test data shape: ', X_test.shape) print('Test labels shape: ', y_test.shape) # Visualize some examples from the dataset. # We show a few examples of training images from each class. classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] num_classes = len(classes) samples_per_class = 7 for y, cls in enumerate(classes): idxs = np.flatnonzero(y_train == y) idxs = np.random.choice(idxs, samples_per_class, replace=False) for i, idx in enumerate(idxs): plt_idx = i * num_classes + y + 1 plt.subplot(samples_per_class, num_classes, plt_idx) plt.imshow(X_train[idx].astype('uint8')) plt.axis('off') if i == 0: plt.title(cls) plt.show() # Subsample the data for more efficient code execution in this exercise num_training = 5000 mask = list(range(num_training)) X_train = X_train[mask] y_train = y_train[mask] num_test = 500 mask = list(range(num_test)) X_test = X_test[mask] y_test = y_test[mask] # Reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) print(X_train.shape, X_test.shape) from cs231n.classifiers import KNearestNeighbor # Create a kNN classifier instance. # Remember that training a kNN classifier is a noop: # the Classifier simply remembers the data and does no further processing classifier = KNearestNeighbor() classifier.train(X_train, y_train)
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps: 1. First we must compute the distances between all test examples and all train examples. 2. Given these distances, for each test example we find the k nearest examples and have them vote for the labelLets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.First, open `cs231n/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
# Open cs231n/classifiers/k_nearest_neighbor.py and implement # compute_distances_two_loops. # Test your implementation: dists = classifier.compute_distances_two_loops(X_test) print(dists.shape) # We can visualize the distance matrix: each row is a single test example and # its distances to training examples plt.imshow(dists, interpolation='none') plt.show()
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
**Inline Question 1:** Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)- What in the data is the cause behind the distinctly bright rows?- What causes the columns? **Your Answer**: *fill this in.** The distinctly bright rows indicate that they are all far away from all the training set (outlier)* The distinctly bright columns indicate that they are all far away from all the test set
# Now implement the function predict_labels and run the code below: # We use k = 1 (which is Nearest Neighbor). y_test_pred = classifier.predict_labels(dists, k=1) # Compute and print the fraction of correctly predicted examples num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`:
y_test_pred = classifier.predict_labels(dists, k=5) num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
You should expect to see a slightly better performance than with `k = 1`.
# Now lets speed up distance matrix computation by using partial vectorization # with one loop. Implement the function compute_distances_one_loop and run the # code below: dists_one = classifier.compute_distances_one_loop(X_test) # To ensure that our vectorized implementation is correct, we make sure that it # agrees with the naive implementation. There are many ways to decide whether # two matrices are similar; one of the simplest is the Frobenius norm. In case # you haven't seen it before, the Frobenius norm of two matrices is the square # root of the squared sum of differences of all elements; in other words, reshape # the matrices into vectors and compute the Euclidean distance between them. difference = np.linalg.norm(dists - dists_one, ord='fro') print('Difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Now implement the fully vectorized version inside compute_distances_no_loops # and run the code dists_two = classifier.compute_distances_no_loops(X_test) # check that the distance matrix agrees with the one we computed before: difference = np.linalg.norm(dists - dists_two, ord='fro') print('Difference was: %f' % (difference, )) if difference < 0.001: print('Good! The distance matrices are the same') else: print('Uh-oh! The distance matrices are different') # Let's compare how fast the implementations are def time_function(f, *args): """ Call a function f with args and return the time (in seconds) that it took to execute. """ import time tic = time.time() f(*args) toc = time.time() return toc - tic two_loop_time = time_function(classifier.compute_distances_two_loops, X_test) print('Two loop version took %f seconds' % two_loop_time) one_loop_time = time_function(classifier.compute_distances_one_loop, X_test) print('One loop version took %f seconds' % one_loop_time) no_loop_time = time_function(classifier.compute_distances_no_loops, X_test) print('No loop version took %f seconds' % no_loop_time) # you should see significantly faster performance with the fully vectorized implementation
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
Cross-validationWe have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
num_folds = 5 k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100] X_train_folds = [] y_train_folds = [] ################################################################################ # TODO: # # Split up the training data into folds. After splitting, X_train_folds and # # y_train_folds should each be lists of length num_folds, where # # y_train_folds[i] is the label vector for the points in X_train_folds[i]. # # Hint: Look up the numpy array_split function. # ################################################################################ #pass X_train_folds = np.array_split(X_train, num_folds) y_train_folds = np.array_split(y_train, num_folds) ################################################################################ # END OF YOUR CODE # ################################################################################ # A dictionary holding the accuracies for different values of k that we find # when running cross-validation. After running cross-validation, # k_to_accuracies[k] should be a list of length num_folds giving the different # accuracy values that we found when using that value of k. k_to_accuracies = {} ################################################################################ # TODO: # # Perform k-fold cross validation to find the best value of k. For each # # possible value of k, run the k-nearest-neighbor algorithm num_folds times, # # where in each case you use all but one of the folds as training data and the # # last fold as a validation set. Store the accuracies for all fold and all # # values of k in the k_to_accuracies dictionary. # ################################################################################ #pass for k in k_choices: inner_accuracies = np.zeros(num_folds) for i in range(num_folds): X_sub_train = np.concatenate(np.delete(X_train_folds, i, axis=0)) y_sub_train = np.concatenate(np.delete(y_train_folds, i, axis=0)) print(X_sub_train.shape,y_sub_train.shape) X_sub_test = X_train_folds[i] y_sub_test = y_train_folds[i] print(X_sub_test.shape,y_sub_test.shape) classifier = KNearestNeighbor() classifier.train(X_sub_train, y_sub_train) dists = classifier.compute_distances_no_loops(X_sub_test) pred_y = classifier.predict_labels(dists, k) num_correct = np.sum(y_sub_test == pred_y) inner_accuracies[i] = float(num_correct)/X_test.shape[0] k_to_accuracies[k] = np.sum(inner_accuracies)/num_folds ################################################################################ # END OF YOUR CODE # ################################################################################ # Print out the computed accuracies for k in sorted(k_to_accuracies): for accuracy in k_to_accuracies[k]: print('k = %d, accuracy = %f' % (k, accuracy)) X_train_folds = np.array_split(X_train, 5) t = np.delete(X_train_folds, 1,axis=0) print(X_train_folds) # plot the raw observations for k in k_choices: accuracies = k_to_accuracies[k] plt.scatter([k] * len(accuracies), accuracies) # plot the trend line with error bars that correspond to standard deviation accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())]) accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())]) plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std) plt.title('Cross-validation on k') plt.xlabel('k') plt.ylabel('Cross-validation accuracy') plt.show() # Based on the cross-validation results above, choose the best value for k, # retrain the classifier using all the training data, and test it on the test # data. You should be able to get above 28% accuracy on the test data. best_k = 1 classifier = KNearestNeighbor() classifier.train(X_train, y_train) y_test_pred = classifier.predict(X_test, k=best_k) # Compute and display the accuracy num_correct = np.sum(y_test_pred == y_test) accuracy = float(num_correct) / num_test print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
_____no_output_____
MIT
assignment1/.ipynb_checkpoints/knn-checkpoint.ipynb
billzhao1990/CS231n-Spring-2017
Callback> Basic callbacks for Learner Callback -
#export _inner_loop = "begin_batch after_pred after_loss after_backward after_step after_cancel_batch after_batch".split() #export class Callback(GetAttr): "Basic class handling tweaks of the training loop by changing a `Learner` in various events" _default,learn,run,run_train,run_valid = 'learn',None,True,True,True def __repr__(self): return type(self).__name__ def __call__(self, event_name): "Call `self.{event_name}` if it's defined" _run = (event_name not in _inner_loop or (self.run_train and getattr(self, 'training', True)) or (self.run_valid and not getattr(self, 'training', False))) if self.run and _run: getattr(self, event_name, noop)() if event_name=='after_fit': self.run=True #Reset self.run to True at each end of fit def __setattr__(self, name, value): if hasattr(self.learn,name): warn(f"You are setting an attribute ({name}) that also exists in the learner. Please be advised that you're not setting it in the learner but in the callback. Use `self.learn.{name}` if you would like to change it in the learner.") super().__setattr__(name, value) @property def name(self): "Name of the `Callback`, camel-cased and with '*Callback*' removed" return class2attr(self, 'Callback')
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
The training loop is defined in `Learner` a bit below and consists in a minimal set of instructions: looping through the data we:- compute the output of the model from the input- calculate a loss between this output and the desired target- compute the gradients of this loss with respect to all the model parameters- update the parameters accordingly- zero all the gradientsAny tweak of this training loop is defined in a `Callback` to avoid over-complicating the code of the training loop, and to make it easy to mix and match different techniques (since they'll be defined in different callbacks). A callback can implement actions on the following events:- `begin_fit`: called before doing anything, ideal for initial setup.- `begin_epoch`: called at the beginning of each epoch, useful for any behavior you need to reset at each epoch.- `begin_train`: called at the beginning of the training part of an epoch.- `begin_batch`: called at the beginning of each batch, just after drawing said batch. It can be used to do any setup necessary for the batch (like hyper-parameter scheduling) or to change the input/target before it goes in the model (change of the input with techniques like mixup for instance).- `after_pred`: called after computing the output of the model on the batch. It can be used to change that output before it's fed to the loss.- `after_loss`: called after the loss has been computed, but before the backward pass. It can be used to add any penalty to the loss (AR or TAR in RNN training for instance).- `after_backward`: called after the backward pass, but before the update of the parameters. It can be used to do any change to the gradients before said update (gradient clipping for instance).- `after_step`: called after the step and before the gradients are zeroed.- `after_batch`: called at the end of a batch, for any clean-up before the next one.- `after_train`: called at the end of the training phase of an epoch.- `begin_validate`: called at the beginning of the validation phase of an epoch, useful for any setup needed specifically for validation.- `after_validate`: called at the end of the validation part of an epoch.- `after_epoch`: called at the end of an epoch, for any clean-up before the next one.- `after_fit`: called at the end of training, for final clean-up.
show_doc(Callback.__call__) tst_cb = Callback() tst_cb.call_me = lambda: print("maybe") test_stdout(lambda: tst_cb("call_me"), "maybe") show_doc(Callback.__getattr__)
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
This is a shortcut to avoid having to write `self.learn.bla` for any `bla` attribute we seek, and just write `self.bla`.
mk_class('TstLearner', 'a') class TstCallback(Callback): def batch_begin(self): print(self.a) learn,cb = TstLearner(1),TstCallback() cb.learn = learn test_stdout(lambda: cb('batch_begin'), "1")
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
Note that it only works to get the value of the attribute, if you want to change it, you have to manually access it with `self.learn.bla`. In the example below, `self.a += 1` creates an `a` attribute of 2 in the callback instead of setting the `a` of the learner to 2. It also issues a warning that something is probably wrong:
learn.a class TstCallback(Callback): def batch_begin(self): self.a += 1 learn,cb = TstLearner(1),TstCallback() cb.learn = learn cb('batch_begin') test_eq(cb.a, 2) test_eq(cb.learn.a, 1)
/home/sgugger/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:16: UserWarning: You are setting an attribute (a) that also exists in the learner. Please be advised that you're not setting it in the learner but in the callback. Use `self.learn.a` if you would like to change it in the learner. app.launch_new_instance()
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
A proper version needs to write `self.learn.a = self.a + 1`:
class TstCallback(Callback): def batch_begin(self): self.learn.a = self.a + 1 learn,cb = TstLearner(1),TstCallback() cb.learn = learn cb('batch_begin') test_eq(cb.learn.a, 2) show_doc(Callback.name, name='Callback.name') test_eq(TstCallback().name, 'tst') class ComplicatedNameCallback(Callback): pass test_eq(ComplicatedNameCallback().name, 'complicated_name')
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
TrainEvalCallback -
#export class TrainEvalCallback(Callback): "`Callback` that tracks the number of iterations done and properly sets training/eval mode" run_valid = False def begin_fit(self): "Set the iter and epoch counters to 0, put the model and the right device" self.learn.train_iter,self.learn.pct_train = 0,0. self.model.to(self.dls.device) def after_batch(self): "Update the iter counter (in training mode)" self.learn.pct_train += 1./(self.n_iter*self.n_epoch) self.learn.train_iter += 1 def begin_train(self): "Set the model in training mode" self.learn.pct_train=self.epoch/self.n_epoch self.model.train() self.learn.training=True def begin_validate(self): "Set the model in validation mode" self.model.eval() self.learn.training=False show_doc(TrainEvalCallback, title_level=3)
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
This `Callback` is automatically added in every `Learner` at initialization.
#hide #test of the TrainEvalCallback below in Learner.fit show_doc(TrainEvalCallback.begin_fit) show_doc(TrainEvalCallback.after_batch) show_doc(TrainEvalCallback.begin_train) show_doc(TrainEvalCallback.begin_validate) # export defaults.callbacks = [TrainEvalCallback]
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
GatherPredsCallback -
#export #TODO: save_targs and save_preds only handle preds/targets that have one tensor, not tuples of tensors. class GatherPredsCallback(Callback): "`Callback` that saves the predictions and targets, optionally `with_loss`" def __init__(self, with_input=False, with_loss=False, save_preds=None, save_targs=None, concat_dim=0): store_attr(self, "with_input,with_loss,save_preds,save_targs,concat_dim") def begin_batch(self): if self.with_input: self.inputs.append((to_detach(self.xb))) def begin_validate(self): "Initialize containers" self.preds,self.targets = [],[] if self.with_input: self.inputs = [] if self.with_loss: self.losses = [] def after_batch(self): "Save predictions, targets and potentially losses" preds,targs = to_detach(self.pred),to_detach(self.yb) if self.save_preds is None: self.preds.append(preds) else: (self.save_preds/str(self.iter)).save_array(preds) if self.save_targs is None: self.targets.append(targs) else: (self.save_targs/str(self.iter)).save_array(targs[0]) if self.with_loss: bs = find_bs(self.yb) loss = self.loss if self.loss.numel() == bs else self.loss.view(bs,-1).mean(1) self.losses.append(to_detach(loss)) def after_validate(self): "Concatenate all recorded tensors" if self.with_input: self.inputs = detuplify(to_concat(self.inputs, dim=self.concat_dim)) if not self.save_preds: self.preds = detuplify(to_concat(self.preds, dim=self.concat_dim)) if not self.save_targs: self.targets = detuplify(to_concat(self.targets, dim=self.concat_dim)) if self.with_loss: self.losses = to_concat(self.losses) def all_tensors(self): res = [None if self.save_preds else self.preds, None if self.save_targs else self.targets] if self.with_input: res = [self.inputs] + res if self.with_loss: res.append(self.losses) return res show_doc(GatherPredsCallback, title_level=3) show_doc(GatherPredsCallback.begin_validate) show_doc(GatherPredsCallback.after_batch) show_doc(GatherPredsCallback.after_validate)
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
Callbacks control flow It happens that we may want to skip some of the steps of the training loop: in gradient accumulation, we don't aways want to do the step/zeroing of the grads for instance. During an LR finder test, we don't want to do the validation phase of an epoch. Or if we're training with a strategy of early stopping, we want to be able to completely interrupt the training loop.This is made possible by raising specific exceptions the training loop will look for (and properly catch).
#export _ex_docs = dict( CancelFitException="Skip the rest of this batch and go to `after_batch`", CancelEpochException="Skip the rest of the training part of the epoch and go to `after_train`", CancelTrainException="Skip the rest of the validation part of the epoch and go to `after_validate`", CancelValidException="Skip the rest of this epoch and go to `after_epoch`", CancelBatchException="Interrupts training and go to `after_fit`") for c,d in _ex_docs.items(): mk_class(c,sup=Exception,doc=d) show_doc(CancelBatchException, title_level=3) show_doc(CancelTrainException, title_level=3) show_doc(CancelValidException, title_level=3) show_doc(CancelEpochException, title_level=3) show_doc(CancelFitException, title_level=3)
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
You can detect one of those exceptions occurred and add code that executes right after with the following events:- `after_cancel_batch`: reached imediately after a `CancelBatchException` before proceeding to `after_batch`- `after_cancel_train`: reached imediately after a `CancelTrainException` before proceeding to `after_epoch`- `after_cancel_valid`: reached imediately after a `CancelValidException` before proceeding to `after_epoch`- `after_cancel_epoch`: reached imediately after a `CancelEpochException` before proceeding to `after_epoch`- `after_cancel_fit`: reached imediately after a `CancelFitException` before proceeding to `after_fit`
# export _events = L.split('begin_fit begin_epoch begin_train begin_batch after_pred after_loss \ after_backward after_step after_cancel_batch after_batch after_cancel_train \ after_train begin_validate after_cancel_validate after_validate after_cancel_epoch \ after_epoch after_cancel_fit after_fit') mk_class('event', **_events.map_dict(), doc="All possible events as attributes to get tab-completion and typo-proofing") # export _all_ = ['event'] show_doc(event, name='event', title_level=3) test_eq(event.after_backward, 'after_backward')
_____no_output_____
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
Here's the full list: *begin_fit begin_epoch begin_train begin_batch after_pred after_loss after_backward after_step after_cancel_batch after_batch after_cancel_train after_train begin_validate after_cancel_validate after_validate after_cancel_epoch after_epoch after_cancel_fit after_fit*. Export -
#hide from nbdev.export import notebook2script notebook2script()
Converted 00_torch_core.ipynb. Converted 01_layers.ipynb. Converted 02_data.load.ipynb. Converted 03_data.core.ipynb. Converted 04_data.external.ipynb. Converted 05_data.transforms.ipynb. Converted 06_data.block.ipynb. Converted 07_vision.core.ipynb. Converted 08_vision.data.ipynb. Converted 09_vision.augment.ipynb. Converted 09b_vision.utils.ipynb. Converted 09c_vision.widgets.ipynb. Converted 10_tutorial.pets.ipynb. Converted 11_vision.models.xresnet.ipynb. Converted 12_optimizer.ipynb. Converted 13_callback.core.ipynb. Converted 13a_learner.ipynb. Converted 13b_metrics.ipynb. Converted 14_callback.schedule.ipynb. Converted 14a_callback.data.ipynb. Converted 15_callback.hook.ipynb. Converted 15a_vision.models.unet.ipynb. Converted 16_callback.progress.ipynb. Converted 17_callback.tracker.ipynb. Converted 18_callback.fp16.ipynb. Converted 19_callback.mixup.ipynb. Converted 20_interpret.ipynb. Converted 20a_distributed.ipynb. Converted 21_vision.learner.ipynb. Converted 22_tutorial.imagenette.ipynb. Converted 23_tutorial.transfer_learning.ipynb. Converted 30_text.core.ipynb. Converted 31_text.data.ipynb. Converted 32_text.models.awdlstm.ipynb. Converted 33_text.models.core.ipynb. Converted 34_callback.rnn.ipynb. Converted 35_tutorial.wikitext.ipynb. Converted 36_text.models.qrnn.ipynb. Converted 37_text.learner.ipynb. Converted 38_tutorial.ulmfit.ipynb. Converted 40_tabular.core.ipynb. Converted 41_tabular.data.ipynb. Converted 42_tabular.model.ipynb. Converted 43_tabular.learner.ipynb. Converted 45_collab.ipynb. Converted 50_datablock_examples.ipynb. Converted 60_medical.imaging.ipynb. Converted 65_medical.text.ipynb. Converted 70_callback.wandb.ipynb. Converted 71_callback.tensorboard.ipynb. Converted 72_callback.neptune.ipynb. Converted 97_test_utils.ipynb. Converted 99_pytorch_doc.ipynb. Converted index.ipynb.
Apache-2.0
nbs/13_callback.core.ipynb
aquietlife/fastai2
Calculating the Return of Indices *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* Consider three famous American market indices – Dow Jones, S&P 500, and the Nasdaq for the period of 1st of January 2000 until today.
import numpy as np import pandas as pd from pandas_datareader import data as wb import matplotlib.pyplot as plt tickers = ['^DJI', '^GSPC', '^IXIC'] ind_data = pd.DataFrame() for t in tickers: ind_data[t] = wb.DataReader(t, data_source='yahoo', start='2000-1-1')['Adj Close'] ind_data.head() ind_data.tail()
_____no_output_____
Apache-2.0
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/11_Calculating the Rate of Return of Indices (5:03)/Calculating the Return of Indices - Solution_Yahoo_Py3.ipynb
olayinka04/365-data-science-courses
Normalize the data to 100 and plot the results on a graph.
(ind_data / ind_data.iloc[0] * 100).plot(figsize=(15, 6)); plt.show()
_____no_output_____
Apache-2.0
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/11_Calculating the Rate of Return of Indices (5:03)/Calculating the Return of Indices - Solution_Yahoo_Py3.ipynb
olayinka04/365-data-science-courses
How would you explain the common and the different parts of the behavior of the three indices? ***** Obtain the simple returns of the indices.
ind_returns = (ind_data / ind_data.shift(1)) - 1 ind_returns.tail()
_____no_output_____
Apache-2.0
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/11_Calculating the Rate of Return of Indices (5:03)/Calculating the Return of Indices - Solution_Yahoo_Py3.ipynb
olayinka04/365-data-science-courses
Estimate the average annual return of each index.
annual_ind_returns = ind_returns.mean() * 250 annual_ind_returns
_____no_output_____
Apache-2.0
23 - Python for Finance/2_Calculating and Comparing Rates of Return in Python/11_Calculating the Rate of Return of Indices (5:03)/Calculating the Return of Indices - Solution_Yahoo_Py3.ipynb
olayinka04/365-data-science-courses
Read the CSV and Perform Basic Data Cleaning
df = pd.read_csv("resources/exoplanet_data.csv") # Drop the null columns where all values are null df = df.dropna(axis='columns', how='all') # Drop the null rows df = df.dropna() df.head()
_____no_output_____
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Select your features (columns)
# Set features. This will also be used as your x values. #selected_features = df[['names', 'of', 'selected', 'features', 'here']] feature_list = df.columns.to_list() feature_list.remove("koi_disposition") removal_list = [] for x in feature_list: if "err" in x: removal_list.append(x) print(removal_list) selected_features = df[feature_list].drop(columns=removal_list) selected_features.head()
['koi_period_err1', 'koi_period_err2', 'koi_time0bk_err1', 'koi_time0bk_err2', 'koi_impact_err1', 'koi_impact_err2', 'koi_duration_err1', 'koi_duration_err2', 'koi_depth_err1', 'koi_depth_err2', 'koi_prad_err1', 'koi_prad_err2', 'koi_insol_err1', 'koi_insol_err2', 'koi_steff_err1', 'koi_steff_err2', 'koi_slogg_err1', 'koi_slogg_err2', 'koi_srad_err1', 'koi_srad_err2']
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Create a Train Test SplitUse `koi_disposition` for the y values
from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(selected_features, df["koi_disposition"], random_state=13) X_train.head()
_____no_output_____
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Pre-processingScale the data using the MinMaxScaler and perform some feature selection
# Scale your data from sklearn.preprocessing import MinMaxScaler X_scaler = MinMaxScaler().fit(X_train) #y_scaler = MinMaxScaler().fit(y_train) X_train_scaled = X_scaler.transform(X_train) X_test_scaled = X_scaler.transform(X_test) #y_train_scaled = y_scaler.transform(y_train) #y_test_scaled = y_scaler.transform(y_train)
_____no_output_____
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Train the Model
from sklearn import tree decision_tree_model = tree.DecisionTreeClassifier() decision_tree_model = decision_tree_model.fit(X_train, y_train) print(f"Training Data Score: {decision_tree_model.score(X_train_scaled, y_train)}") print(f"Testing Data Score: {decision_tree_model.score(X_test_scaled, y_test)}")
Training Data Score: 0.6055693305359527 Testing Data Score: 0.5835240274599542
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Hyperparameter TuningUse `GridSearchCV` to tune the model's parameters
decision_tree_model.get_params() # Create the GridSearchCV model from sklearn.model_selection import GridSearchCV param_grid = {'C': [1, 5, 10, 50], 'gamma': [0.0001, 0.0005, 0.001, 0.005]} grid = GridSearchCV(decision_tree_model, param_grid, verbose=3) # Train the model with GridSearch grid.fit(X_train,y_train) print(grid.best_params_) print(grid.best_score_)
_____no_output_____
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Save the Model
# save your model by updating "your_name" with your name # and "your_model" with your model variable # be sure to turn this in to BCS # if joblib fails to import, try running the command to install in terminal/git-bash import joblib filename = 'your_name.sav' joblib.dump(your_model, filename)
_____no_output_____
MIT
model_1 - DecisionTree.ipynb
RussellMcGrath/machine-learning-challenge
Negative Binomial Regression (Students absence example) Negative binomial distribution review I always experience some kind of confusion when looking at the negative binomial distribution after a while of not working with it. There are so many different definitions that I usually need to read everything more than once. The definition I've first learned, and the one I like the most, says as follows: The negative binomial distribution is the distribution of a random variable that is defined as the number of independent Bernoulli trials until the k-th "success". In short, we repeat a Bernoulli experiment until we observe k successes and record the number of trials it required.$$Y \sim \text{NB}(k, p)$$where $0 \le p \le 1$ is the probability of success in each Bernoulli trial, $k > 0$, usually integer, and $y \in \{k, k + 1, \cdots\}$The probability mass function (pmf) is $$p(y | k, p)= \binom{y - 1}{y-k}(1 -p)^{y - k}p^k$$If you, like me, find it hard to remember whether $y$ starts at $0$, $1$, or $k$, try to think twice about the definition of the variable. But how? First, recall we aim to have $k$ successes. And success is one of the two possible outcomes of a trial, so the number of trials can never be smaller than the number of successes. Thus, we can be confident to say that $y \ge k$. But this is not the only way of defining the negative binomial distribution, there are plenty of options! One of the most interesting, and the one you see in [PyMC3](https://docs.pymc.io/api/distributions/discrete.htmlpymc3.distributions.discrete.NegativeBinomial), the library we use in Bambi for the backend, is as a continuous mixture. The negative binomial distribution describes a Poisson random variable whose rate is also a random variable (not a fixed constant!) following a gamma distribution. Or in other words, conditional on a gamma-distributed variable $\mu$, the variable $Y$ has a Poisson distribution with mean $\mu$.Under this alternative definition, the pmf is$$\displaystyle p(y | k, \alpha) = \binom{y + \alpha - 1}{y} \left(\frac{\alpha}{\mu + \alpha}\right)^\alpha\left(\frac{\mu}{\mu + \alpha}\right)^y$$where $\mu$ is the parameter of the Poisson distribution (the mean, and variance too!) and $\alpha$ is the rate parameter of the gamma.
import arviz as az import bambi as bmb import matplotlib.pyplot as plt import numpy as np import pandas as pd from scipy.stats import nbinom az.style.use("arviz-darkgrid") import warnings warnings.simplefilter(action='ignore', category=FutureWarning)
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
In SciPy, the definition of the negative binomial distribution differs a little from the one in our introduction. They define $Y$ = Number of failures until k successes and then $y$ starts at 0. In the following plot, we have the probability of observing $y$ failures before we see $k=3$ successes.
y = np.arange(0, 30) k = 3 p1 = 0.5 p2 = 0.3 fig, ax = plt.subplots(1, 2, figsize=(12, 4), sharey=True) ax[0].bar(y, nbinom.pmf(y, k, p1)) ax[0].set_xticks(np.linspace(0, 30, num=11)) ax[0].set_title(f"k = {k}, p = {p1}") ax[1].bar(y, nbinom.pmf(y, k, p2)) ax[1].set_xticks(np.linspace(0, 30, num=11)) ax[1].set_title(f"k = {k}, p = {p2}") fig.suptitle("Y = Number of failures until k successes", fontsize=16);
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
For example, when $p=0.5$, the probability of seeing $y=0$ failures before 3 successes (or in other words, the probability of having 3 successes out of 3 trials) is 0.125, and the probability of seeing $y=3$ failures before 3 successes is 0.156.
print(nbinom.pmf(y, k, p1)[0]) print(nbinom.pmf(y, k, p1)[3])
0.12499999999999997 0.15624999999999992
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Finally, if one wants to show this probability mass function as if we are following the first definition of negative binomial distribution we introduced, we just need to shift the whole thing to the right by adding $k$ to the $y$ values.
fig, ax = plt.subplots(1, 2, figsize=(12, 4), sharey=True) ax[0].bar(y + k, nbinom.pmf(y, k, p1)) ax[0].set_xticks(np.linspace(3, 30, num=10)) ax[0].set_title(f"k = {k}, p = {p1}") ax[1].bar(y + k, nbinom.pmf(y, k, p2)) ax[1].set_xticks(np.linspace(3, 30, num=10)) ax[1].set_title(f"k = {k}, p = {p2}") fig.suptitle("Y = Number of trials until k successes", fontsize=16);
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Negative binomial in GLM The negative binomial distribution belongs to the exponential family, and the canonical link function is $$g(\mu_i) = \log\left(\frac{\mu_i}{k + \mu_i}\right) = \log\left(\frac{k}{\mu_i} + 1\right)$$but it is difficult to interpret. The log link is usually preferred because of the analogy with Poisson model, and it also tends to give better results. Load and explore Students dataThis example is based on this [UCLA example](https://stats.idre.ucla.edu/r/dae/negative-binomial-regression/).School administrators study the attendance behavior of high school juniors at two schools. Predictors of the **number of days of absence** include the **type of program** in which the student is enrolled and a **standardized test in math**. We have attendance data on 314 high school juniors.The variables of insterest in the dataset are* daysabs: The number of days of absence. It is our response variable.* progr: The type of program. Can be one of 'General', 'Academic', or 'Vocational'.* math: Score in a standardized math test.
data = pd.read_stata("https://stats.idre.ucla.edu/stat/stata/dae/nb_data.dta") data.head()
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
We assign categories to the values 1, 2, and 3 of our `"prog"` variable.
data["prog"] = data["prog"].map({1: "General", 2: "Academic", 3: "Vocational"}) data.head()
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
The Academic program is the most popular program (167/314) and General is the least popular one (40/314)
data["prog"].value_counts()
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Let's explore the distributions of math score and days of absence for each of the three programs listed above. The vertical lines indicate the mean values.
fig, ax = plt.subplots(3, 2, figsize=(8, 6), sharex="col") programs = list(data["prog"].unique()) programs.sort() for idx, program in enumerate(programs): # Histogram ax[idx, 0].hist(data[data["prog"] == program]["math"], edgecolor='black', alpha=0.9) ax[idx, 0].axvline(data[data["prog"] == program]["math"].mean(), color="C1") # Barplot days = data[data["prog"] == program]["daysabs"] days_mean = days.mean() days_counts = days.value_counts() values = list(days_counts.index) count = days_counts.values ax[idx, 1].bar(values, count, edgecolor='black', alpha=0.9) ax[idx, 1].axvline(days_mean, color="C1") # Titles ax[idx, 0].set_title(program) ax[idx, 1].set_title(program) plt.setp(ax[-1, 0], xlabel="Math score") plt.setp(ax[-1, 1], xlabel="Days of absence");
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
The first impression we have is that the distribution of math scores is not equal for any of the programs. It looks right-skewed for students under the Academic program, left-skewed for students under the Vocational program, and roughly uniform for students in the General program (although there's a drop in the highest values). Clearly those in the Vocational program has the highest mean for the math score. On the other hand, the distribution of the days of absence is right-skewed in all cases. Students in the General program present the highest absence mean while the Vocational group is the one who misses fewer classes on average. ModelsWe are interested in measuring the association between the type of the program and the math score with the days of absence. It's also of interest to see if the association between math score and days of absence is different in each type of program. In order to answer our questions, we are going to fit and compare two models. The first model uses the type of the program and the math score as predictors. The second model also includes the interaction between these two variables. The score in the math test is going to be standardized in both cases to make things easier for the sampler and save some seconds. A good idea to follow along is to run these models without scaling `math` and comparing how long it took to fit.We are going to use a negative binomial likelihood to model the days of absence. But let's stop here and think why we use this likelihood. Earlier, we said that the negative binomial distributon arises when our variable represents the number of trials until we got $k$ successes. However, the number of trials is fixed, i.e. the number of school days in a given year is not a random variable. So if we stick to the definition, we could think of the two alternative views for this problem* Each of the $n$ days is a trial, and we record whether the student is absent ($y=1$) or not ($y=0$). This corresponds to a binary regression setting, where we could think of logistic regression or something alike. A problem here is that we have the sum of $y$ for a student, but not the $n$.* The whole school year represents the space where events occur and we count how many absences we see in that space for each student. This gives us a Poisson regression setting (count of an event in a given space or time).We also know that when $n$ is large and $p$ is small, the Binomial distribution can be approximated with a Poisson distribution with $\lambda = n * p$. We don't know exactly $n$ in this scenario, but we know it is around 180, and we do know that $p$ is small because you can't skip classes all the time. So both modeling approaches should give similar results.But then, why negative binomial? Can't we just use a Poisson likelihood?Yes, we can. However, using a Poisson likelihood implies that the mean is equal to the variance, and that is usually an unrealistic assumption. If it turns out the variance is either substantially smaller or greater than the mean, the Poisson regression model results in a poor fit. Alternatively, if we use a negative binomial likelihood, the variance is not forced to be equal to the mean, and there's more flexibility to handle a given dataset, and consequently, the fit tends to better. Model 1 $$\log{Y_i} = \beta_1 \text{Academic}_i + \beta_2 \text{General}_i + \beta_3 \text{Vocational}_i + \beta_4 \text{Math_std}_i$$ Model 2$$\log{Y_i} = \beta_1 \text{Academic}_i + \beta_2 \text{General}_i + \beta_3 \text{Vocational}_i + \beta_4 \text{Math_std}_i + \beta_5 \text{General}_i \cdot \text{Math_std}_i + \beta_6 \text{Vocational}_i \cdot \text{Math_std}_i$$In both cases we have the following dummy variables$$\text{Academic}_i = \left\{ \begin{array}{ll} 1 & \textrm{if student is under Academic program} \\ 0 & \textrm{other case} \end{array}\right.$$$$\text{General}_i = \left\{ \begin{array}{ll} 1 & \textrm{if student is under General program} \\ 0 & \textrm{other case} \end{array}\right.$$$$\text{Vocational}_i = \left\{ \begin{array}{ll} 1 & \textrm{if student is under Vocational program} \\ 0 & \textrm{other case} \end{array}\right.$$and $Y$ represents the days of absence.So, for example, the first model for a student under the Vocational program reduces to$$\log{Y_i} = \beta_3 + \beta_4 \text{Math_std}_i$$And one last thing to note is we've decided not to inclide an intercept term, that's why you don't see any $\beta_0$ above. This choice allows us to represent the effect of each program directly with $\beta_1$, $\beta_2$, and $\beta_3$. Model fitIt's very easy to fit these models with Bambi. We just pass a formula describing the terms in the model and Bambi will know how to handle each of them correctly. The `0` on the right hand side of `~` simply means we don't want to have the intercept term that is added by default. `scale(math)` tells Bambi we want to use standardize `math` before being included in the model. By default, Bambi uses a log link for negative binomial GLMs. We'll stick to this default here. Model 1
model_additive = bmb.Model("daysabs ~ 0 + prog + scale(math)", data, family="negativebinomial") idata_additive = model_additive.fit()
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (4 chains in 4 jobs) NUTS: [prog, scale(math), daysabs_alpha]
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Model 2For this second model we just add `prog:scale(math)` to indicate the interaction. A shorthand would be to use `y ~ 0 + prog*scale(math)`, which uses the **full interaction** operator. In other words, it just means we want to include the interaction between `prog` and `scale(math)` as well as their main effects.
model_interaction = bmb.Model("daysabs ~ 0 + prog + scale(math) + prog:scale(math)", data, family="negativebinomial") idata_interaction = model_interaction.fit()
Auto-assigning NUTS sampler... Initializing NUTS using jitter+adapt_diag... Multiprocess sampling (4 chains in 4 jobs) NUTS: [prog, scale(math), prog:scale(math), daysabs_alpha]
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Explore models The first thing we do is calling `az.summary()`. Here we pass the `InferenceData` object the `.fit()` returned. This prints information about the marginal posteriors for each parameter in the model as well as convergence diagnostics.
az.summary(idata_additive) az.summary(idata_interaction)
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
The information in the two tables above can be visualized in a more concise manner using a forest plot. ArviZ provides us with `plot_forest()`. There we simply pass a list containing the `InferenceData` objects of the models we want to compare.
az.plot_forest( [idata_additive, idata_interaction], model_names=["Additive", "Interaction"], var_names=["prog", "scale(math)"], combined=True, figsize=(8, 4) );
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
One of the first things one can note when seeing this plot is the similarity between the marginal posteriors. Maybe one can conclude that the variability of the marginal posterior of `scale(math)` is slightly lower in the model that considers the interaction, but the difference is not significant. We can also make conclusions about the association between the program and the math score with the days of absence. First, we see the posterior for the Vocational group is to the left of the posterior for the two other programs, meaning it is associated with fewer absences (as we have seen when first exploring our data). There also seems to be a difference between General and Academic, where we may conclude the students in the General group tend to miss more classes.In addition, the marginal posterior for `math` shows negative values in both cases. This means that students with higher math scores tend to miss fewer classes. Below, we see a forest plot with the posteriors for the coefficients of the interaction effects. Both of them overlap with 0, which means the data does not give much evidence to support there is an interaction effect between program and math score (i.e., the association between math and days of absence is similar for all the programs).
az.plot_forest(idata_interaction, var_names=["prog:scale(math)"], combined=True, figsize=(8, 4)) plt.axvline(0);
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Plot predicted mean responseWe finish this example showing how we can get predictions for new data and plot the mean response for each program together with confidence intervals.
math_score = np.arange(1, 100) # This function takes a model and an InferenceData object. # It returns of length 3 with predictions for each type of program. def predict(model, idata): predictions = [] for program in programs: new_data = pd.DataFrame({"math": math_score, "prog": [program] * len(math_score)}) new_idata = model.predict( idata, data=new_data, inplace=False ) prediction = new_idata.posterior.stack(sample=["chain", "draw"])["daysabs_mean"].values predictions.append(prediction) return predictions prediction_additive = predict(model_additive, idata_additive) prediction_interaction = predict(model_interaction, idata_interaction) mu_additive = [prediction.mean(1) for prediction in prediction_additive] mu_interaction = [prediction.mean(1) for prediction in prediction_interaction] fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize = (10, 4)) for idx, program in enumerate(programs): ax[0].plot(math_score, mu_additive[idx], label=f"{program}", color=f"C{idx}", lw=2) az.plot_hdi(math_score, prediction_additive[idx].T, color=f"C{idx}", ax=ax[0]) ax[1].plot(math_score, mu_interaction[idx], label=f"{program}", color=f"C{idx}", lw=2) az.plot_hdi(math_score, prediction_interaction[idx].T, color=f"C{idx}", ax=ax[1]) ax[0].set_title("Additive"); ax[1].set_title("Interaction"); ax[0].set_xlabel("Math score") ax[1].set_xlabel("Math score") ax[0].set_ylim(0, 25) ax[0].legend(loc="upper right");
_____no_output_____
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
As we can see in this plot, the interval for the mean response for the Vocational program does not overlap with the interval for the other two groups, representing the group of students who miss fewer classes. On the right panel we can also see that including interaction terms does not change the slopes significantly because the posterior distributions of these coefficients have a substantial overlap with 0. If you've made it to the end of this notebook and you're still curious about what else you can do with these two models, you're invited to use `az.compare()` to compare the fit of the two models. What do you expect before seeing the plot? Why? Is there anything else you could do to improve the fit of the model?Also, if you're still curious about what this model would have looked like with the Poisson likelihood, you just need to replace `family="negativebinomial"` with `family="poisson"` and then you're ready to compare results!
%load_ext watermark %watermark -n -u -v -iv -w
Last updated: Wed Jun 01 2022 Python implementation: CPython Python version : 3.9.7 IPython version : 8.3.0 sys : 3.9.7 (default, Sep 16 2021, 13:09:58) [GCC 7.5.0] pandas : 1.4.2 numpy : 1.21.5 arviz : 0.12.1 matplotlib: 3.5.1 bambi : 0.7.1 Watermark: 2.3.0
MIT
docs/notebooks/negative_binomial.ipynb
PsychoinformaticsLab/bambi
Self-Driving Car Engineer Nanodegree Project: **Finding Lane Lines on the Road** ***In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below. Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/!/rubrics/322/view) for this project.---Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**--- **The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**--- Your output should look something like this (above) after detecting line segments using the helper functions below Your goal is to connect/average/extrapolate line segments to get output like this **Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.** Import Packages
#importing some useful packages import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np import cv2 %matplotlib inline
_____no_output_____
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Read in an Image
#reading in an image image = mpimg.imread('test_images/solidWhiteRight.jpg') #printing out some stats and plotting print('This image is:', type(image), 'with dimensions:', image.shape) plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
This image is: <class 'numpy.ndarray'> with dimensions: (540, 960, 3)
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Ideas for Lane Detection Pipeline **Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**`cv2.inRange()` for color selection `cv2.fillPoly()` for regions selection `cv2.line()` to draw lines on an image given endpoints `cv2.addWeighted()` to coadd / overlay two images `cv2.cvtColor()` to grayscale or change color `cv2.imwrite()` to output images to file `cv2.bitwise_and()` to apply a mask to an image**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!** Helper Functions Below are some helper functions to help get you started. They should look familiar from the lesson!
import math def grayscale(img): """Applies the Grayscale transform This will return an image with only one color channel but NOTE: to see the returned image as grayscale (assuming your grayscaled image is called 'gray') you should call plt.imshow(gray, cmap='gray')""" return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) # Or use BGR2GRAY if you read an image with cv2.imread() # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) def canny(img, low_threshold, high_threshold): """Applies the Canny transform""" return cv2.Canny(img, low_threshold, high_threshold) def gaussian_blur(img, kernel_size): """Applies a Gaussian Noise kernel""" return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0) def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. """ #defining a blank mask to start with mask = np.zeros_like(img) #defining a 3 channel or 1 channel color to fill the mask with depending on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 #filling pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) #returning the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image def draw_lines(img, lines, color=[255, 0, 0], thickness=2): """ NOTE: this is the function you might want to use as a starting point once you want to average/extrapolate the line segments you detect to map out the full extent of the lane (going from the result shown in raw-lines-example.mp4 to that shown in P1_example.mp4). Think about things like separating line segments by their slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left line vs. the right line. Then, you can average the position of each of the lines and extrapolate to the top and bottom of the lane. This function draws `lines` with `color` and `thickness`. Lines are drawn on the image inplace (mutates the image). If you want to make the lines semi-transparent, think about combining this function with the weighted_img() function below """ for line in lines: for x1,y1,x2,y2 in line: cv2.line(img, (x1, y1), (x2, y2), color, thickness) def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap): """ `img` should be the output of a Canny transform. Returns an image with hough lines drawn. """ lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap) line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) draw_lines(line_img, lines) return line_img # Python 3 has support for cool math symbols. def weighted_img(img, initial_img, α=0.8, β=1., γ=0.): """ `img` is the output of the hough_lines(), An image with lines drawn on it. Should be a blank image (all black) with lines drawn on it. `initial_img` should be the image before any processing. The result image is computed as follows: initial_img * α + img * β + γ NOTE: initial_img and img must be the same shape! """ return cv2.addWeighted(initial_img, α, img, β, γ)
_____no_output_____
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Test ImagesBuild your pipeline to work on the images in the directory "test_images" **You should make sure your pipeline works well on these images before you try the videos.**
import os list_img = os.listdir("test_images/") os.listdir("test_images/")
_____no_output_____
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Build a Lane Finding Pipeline Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
# TODO: Build your pipeline that will draw lane lines on the test_images # then save them to the test_images_output directory. def lane_finding(image): # 1. convert image to grayscale gray = grayscale(image) cv2.imwrite('test_images_output/gray.jpg',gray) # 2. Gaussian smoothing of gray image kernel_size = 5 gray_blur = gaussian_blur(gray,kernel_size) cv2.imwrite('test_images_output/gray_blur.jpg',gray_blur) # 3. canny edge detection low_threshold = 50 high_threshold = 110 edges = canny(gray_blur, low_threshold,high_threshold) cv2.imwrite('test_images_output/edges.jpg',edges) # 4. region selection (masking) imshape = image.shape lb = [0,imshape[0]] rb = [imshape[1],imshape[0]] lu = [400, 350] ru = [600, 350] #vertices = np.array([[(0,imshape[0]),(400, 350), (600, 350), (imshape[1],imshape[0])]], dtype=np.int32) vertices = np.array([[lb,lu, ru, rb]], dtype=np.int32) plt.imshow(image) x = [lb[0], rb[0], ru[0], lu[0],lb[0]] y = [lb[1], rb[1], ru[1], lu[1],lb[1]] plt.plot(x, y, 'b--', lw=2) plt.savefig('test_images_output/region_interest.jpg') masked_edges = region_of_interest(edges, vertices) # 5. Hough transform for lane lines rho = 1 theta = np.pi/180 threshold = 10 min_line_len = 50 max_line_gap = 100 line_image = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap) # 6. show lanes in original image lane_image = weighted_img(line_image, image, α=0.8, β=1., γ=0.) plt.imshow(lane_image) return lane_image #lane_image = lane_finding(image) #plt.imshow(lane_image) # output_dir = "test_images_output/" # for img in list_img: # image = mpimg.imread('test_images/'+img) # lane_image = lane_finding(image) # img_name = output_dir + img # status = cv2.imwrite(img_name, cv2.cvtColor(lane_image, cv2.COLOR_RGB2BGR)) # caution: # 1. destination folder must exist, or image cannot be saved! # 2. cv2.imwrite changes RGB channels, which need to be converted, or the saved image has different colors # print("Image written to file-system : ",status)
_____no_output_____
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Test on VideosYou know what's cooler than drawing lanes over images? Drawing lanes over video!We can test our solution on two provided videos:`solidWhiteRight.mp4``solidYellowLeft.mp4`**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.****If you get an error that looks like this:**```NeedDownloadError: Need ffmpeg exe. You can download it by calling: imageio.plugins.ffmpeg.download()```**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
# Import everything needed to edit/save/watch video clips from moviepy.editor import VideoFileClip from IPython.display import HTML def process_image(image): # NOTE: The output you return should be a color image (3 channel) for processing video below # TODO: put your pipeline here, # you should return the final output (image where lines are drawn on lanes) result = lane_finding(image) return result
_____no_output_____
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1
Let's try the one with the solid white lane on the right first ...
white_output = 'test_videos_output/solidWhiteRight.mp4' ## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video ## To do so add .subclip(start_second,end_second) to the end of the line below ## Where start_second and end_second are integer values representing the start and end of the subclip ## You may also uncomment the following line for a subclip of the first 5 seconds ##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5) clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! %time white_clip.write_videofile(white_output, audio=False)
t: 0%| | 0/221 [00:00<?, ?it/s, now=None]
MIT
P1.ipynb
owennottank/CarND-LaneLines-P1