path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2001733/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
museums = pd.read_csv('../input/museums.csv').dropna(subset=['Revenue'])
museums = museums[museums.Revenue != 0]
zoos = museums['Revenue'][museums['Museum Type'] == 'ZOO, AQUARIUM, OR WILDLIFE CONSERVATION']
other = museums['Revenue'][museums['Museum Type'] != 'ZOO, AQUARIUM, OR WILDLIFE CONSERVATION']
print('Mean revenue for zoos:')
print(zoos.mean())
print('Mean revenue for others:')
print(other.mean()) | code |
2001733/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
museums = pd.read_csv('../input/museums.csv').dropna(subset=['Revenue'])
museums = museums[museums.Revenue != 0]
museums.head(5) | code |
2001733/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
museums = pd.read_csv('../input/museums.csv').dropna(subset=['Revenue'])
museums = museums[museums.Revenue != 0]
zoos = museums['Revenue'][museums['Museum Type'] == 'ZOO, AQUARIUM, OR WILDLIFE CONSERVATION']
other = museums['Revenue'][museums['Museum Type'] != 'ZOO, AQUARIUM, OR WILDLIFE CONSERVATION']
other.describe() | code |
2001733/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
museums = pd.read_csv('../input/museums.csv').dropna(subset=['Revenue'])
museums = museums[museums.Revenue != 0]
museums['Museum Type'].unique() | code |
2025203/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
airinfo.head() | code |
2025203/cell_25 | [
"text_plain_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
airinfo.head() | code |
2025203/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
air.head() | code |
2025203/cell_34 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
sub = pd.read_csv('../input/sample_submission.csv')
sub.head() | code |
2025203/cell_30 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
traindf = final.copy()
traindf = traindf.drop(['air_area_name', 'wday', 'air_store_id', 'visit_date', 'day_of_week', 'calendar_date'], axis=1)
traindf.head() | code |
2025203/cell_33 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
traindf = final.copy()
traindf = traindf.drop(['air_area_name', 'wday', 'air_store_id', 'visit_date', 'day_of_week', 'calendar_date'], axis=1)
reg = GradientBoostingRegressor(n_estimators=100)
scores = cross_val_score(reg, traindf.drop(['visitors'], axis=1), traindf['visitors'])
scores
reg.fit(traindf.drop(['visitors'], axis=1), traindf['visitors']) | code |
2025203/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
air.tail() | code |
2025203/cell_26 | [
"text_plain_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
final.head() | code |
2025203/cell_2 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2025203/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
len(airvisit['air_store_id'].unique()) | code |
2025203/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
len(airinfo['air_genre_name'].unique()) | code |
2025203/cell_32 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
traindf = final.copy()
traindf = traindf.drop(['air_area_name', 'wday', 'air_store_id', 'visit_date', 'day_of_week', 'calendar_date'], axis=1)
reg = GradientBoostingRegressor(n_estimators=100)
scores = cross_val_score(reg, traindf.drop(['visitors'], axis=1), traindf['visitors'])
scores | code |
2025203/cell_28 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
final.head() | code |
2025203/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airvisit.tail() | code |
2025203/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
len(airinfo['air_store_id'].unique()) | code |
2025203/cell_22 | [
"text_html_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd', width=2000000, height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.0 * i / 10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f, axa = plt.subplots(1, 1, figsize=(15, 16))
m.drawcoastlines()
m.fillcontinents(color='lightgray', lake_color='aqua', zorder=1)
m.scatter(airinfo.longitude.values, airinfo.latitude.values, color=colored, s=20, alpha=1, zorder=999, latlon=True)
m.scatter(cx, cy, color='Black', s=50, alpha=1, latlon=True, zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction', fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction', fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction', fontsize=20)
plt.annotate('Osaka', xy=(0.4, 0.3), xycoords='axes fraction', fontsize=20)
plt.annotate('Tokyo', xy=(0.6, 0.4), xycoords='axes fraction', fontsize=20)
plt.annotate('Shizoku', xy=(0.5, 0.32), xycoords='axes fraction', fontsize=20)
for i in range(len(cx)):
xpt, ypt = m(cx[i], cy[i])
plt.annotate(i, (xpt + 500, ypt + 500), zorder=99999, fontsize=16)
plt.show() | code |
2025203/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
len(air['air_store_id'].unique()) | code |
2025203/cell_27 | [
"image_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
dates.head() | code |
2025203/cell_37 | [
"text_plain_output_1.png"
] | from mpl_toolkits.basemap import Basemap
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
air = pd.read_csv('../input/air_reserve.csv')
airvisit = pd.read_csv('../input/air_visit_data.csv')
airvisit['visit_date'] = pd.to_datetime(airvisit['visit_date']).dt.date
airinfo = pd.read_csv('../input/air_store_info.csv')
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
kmeans = KMeans(n_clusters=10, random_state=0).fit(airinfo[['longitude', 'latitude']])
airinfo['cluster'] = kmeans.predict(airinfo[['longitude', 'latitude']])
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='aeqd',width=2000000,height=2000000, lat_0=37.5, lon_0=138.2)
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
cm = plt.get_cmap('gist_rainbow')
colors = [cm(2.*i/10) for i in range(10)]
colored = [colors[k] for k in airinfo['cluster']]
f,axa = plt.subplots(1,1,figsize=(15,16))
m.drawcoastlines()
m.fillcontinents(color='lightgray',lake_color='aqua',zorder=1)
m.scatter(airinfo.longitude.values,airinfo.latitude.values,color=colored,s=20,alpha=1,zorder=999,latlon=True)
m.scatter(cx,cy,color='Black',s=50,alpha=1,latlon=True,zorder=9999)
plt.setp(axa.get_yticklabels(), visible=True)
plt.annotate('Fukuoka', xy=(0.04, 0.32), xycoords='axes fraction',fontsize=20)
plt.annotate('Shikoku', xy=(0.25, 0.25), xycoords='axes fraction',fontsize=20)
plt.annotate('Hiroshima', xy=(0.2, 0.36), xycoords='axes fraction',fontsize=20)
plt.annotate('Osaka', xy=(0.40, 0.30), xycoords='axes fraction',fontsize=20)
plt.annotate('Tokyo', xy=(0.60, 0.4), xycoords='axes fraction',fontsize=20)
plt.annotate('Shizoku', xy=(0.50, 0.32), xycoords='axes fraction',fontsize=20)
for i in range(len(cx)):
xpt,ypt = m(cx[i],cy[i])
plt.annotate(i, (xpt+500,ypt+500),zorder=99999,fontsize=16)
plt.show()
final = pd.merge(airvisit, airinfo).drop(['latitude', 'longitude'], axis=1)
dates = pd.read_csv('../input/date_info.csv')
vdt = pd.to_datetime(final.visit_date)
final['vd'] = vdt.dt.date
final['yday'] = vdt.dt.dayofyear
final['wday'] = vdt.dt.dayofweek
final = final.drop(['vd'], axis=1)
dts = pd.to_datetime(dates.calendar_date)
days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
dates['calendar_date'] = pd.to_datetime(dates['calendar_date']).dt.date
dates['dw'] = [days.index(dw) for dw in dates.day_of_week]
final = pd.merge(final, dates, left_on='visit_date', right_on='calendar_date')
sub = pd.read_csv('../input/sample_submission.csv')
base = pd.to_datetime('2017-04-23')
date_list = pd.date_range(base, periods=39)
k = 1
datedf = pd.DataFrame({'key': k, 'date': date_list})
ids = airinfo.air_store_id
k = 1
ids = pd.DataFrame({'key': k, 'air_store_id': ids})
testdf = pd.merge(ids, datedf, on='key')
testdf['date'] = pd.to_datetime(testdf['date']).dt.date
testdf['yday'] = pd.to_datetime(testdf['date']).dt.dayofyear
finalt = pd.merge(testdf, airinfo).drop(['air_area_name', 'latitude', 'longitude'], axis=1)
finalt = pd.merge(finalt, dates, left_on='date', right_on='calendar_date').drop(['day_of_week', 'calendar_date'], axis=1)
finalt = finalt.drop(['date', 'key'], axis=1)
finalt.head() | code |
2031459/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
test_df.head() | code |
2031459/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
train.head() | code |
2031459/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
whole_data = whole_data.replace(np.nan, whole_data.mean()).head(5)
from sklearn.model_selection import train_test_split
X = whole_data[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch']]
y = whole_data[['SaleCondition']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(0.1, 1.0, 5)):
plt.figure(figsize=(10, 6))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training examples')
plt.ylabel('Score')
train_sizes, train_scores, test_scores = learning_curve(estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color='r')
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color='g')
plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label='Training score')
plt.plot(train_sizes, test_scores_mean, 'o-', color='g', label='Cross-validation score')
plt.legend(loc='best')
return plt | code |
2031459/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test.head() | code |
2031459/cell_15 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
whole_data = whole_data.replace(np.nan, whole_data.mean()).head(5)
from sklearn.model_selection import train_test_split
X = whole_data[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch']]
y = whole_data[['SaleCondition']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
X_std = sc.transform(X)
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
knn = KNeighborsClassifier(n_neighbors=3, weights='uniform')
knn.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, knn.predict(X_test_std)))
print(metrics.confusion_matrix(y_test, knn.predict(X_test_std), labels=['Normal', 'Abnorml'])) | code |
2031459/cell_16 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
whole_data = whole_data.replace(np.nan, whole_data.mean()).head(5)
from sklearn.model_selection import train_test_split
X = whole_data[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch']]
y = whole_data[['SaleCondition']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
X_std = sc.transform(X)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train_std, y_train) | code |
2031459/cell_3 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.head() | code |
2031459/cell_17 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
whole_data = whole_data.replace(np.nan, whole_data.mean()).head(5)
from sklearn.model_selection import train_test_split
X = whole_data[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch']]
y = whole_data[['SaleCondition']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
X_std = sc.transform(X)
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
knn = KNeighborsClassifier(n_neighbors=3, weights='uniform')
knn.fit(X_train_std, y_train)
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train_std, y_train)
print(metrics.classification_report(y_test, gnb.predict(X_test_std)))
print(metrics.confusion_matrix(y_test, gnb.predict(X_test_std), labels=['Normal', 'Abnorml'])) | code |
2031459/cell_14 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
import numpy as np
import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
whole_data = whole_data.replace(np.nan, whole_data.mean()).head(5)
from sklearn.model_selection import train_test_split
X = whole_data[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch']]
y = whole_data[['SaleCondition']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
X_std = sc.transform(X)
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
knn = KNeighborsClassifier(n_neighbors=3, weights='uniform')
knn.fit(X_train_std, y_train) | code |
2031459/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train = train_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
test = test_df[['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'MasVnrArea', 'BsmtFinSF1', 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'SaleCondition']]
farms = [train, test]
whole_data = pd.concat(farms)
len(whole_data) | code |
2031459/cell_5 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
train_df.describe() | code |
17111741/cell_4 | [
"text_plain_output_1.png"
] | from PIL import Image
train_cat = '../input/training_set/training_set/cats'
train_dog = '../input/training_set/training_set/dogs'
test_cat = '../input/test_set/test_set/cats'
test_dog = '../input/test_set/test_set/dogs'
image_size = 128
Image.open(train_cat + '/' + 'cat.1.jpg')
Image.open('../input/training_set/training_set/dogs/dog.1.jpg') | code |
17111741/cell_1 | [
"text_plain_output_1.png"
] | import os
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
import os
print(os.listdir('../input')) | code |
17111741/cell_3 | [
"image_output_1.png"
] | from PIL import Image
train_cat = '../input/training_set/training_set/cats'
train_dog = '../input/training_set/training_set/dogs'
test_cat = '../input/test_set/test_set/cats'
test_dog = '../input/test_set/test_set/dogs'
image_size = 128
Image.open(train_cat + '/' + 'cat.1.jpg') | code |
17111741/cell_12 | [
"image_output_1.png"
] | from PIL import Image
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np # linear algebra
train_cat = '../input/training_set/training_set/cats'
train_dog = '../input/training_set/training_set/dogs'
test_cat = '../input/test_set/test_set/cats'
test_dog = '../input/test_set/test_set/dogs'
image_size = 128
Image.open(train_cat + '/' + 'cat.1.jpg')
Image.open('../input/training_set/training_set/dogs/dog.1.jpg')
minh, minv = (100000, 100000)
for p in range(1, 4001):
pic = Image.open(train_cat + '/' + 'cat.' + str(p) + '.jpg')
if pic.size[0] < minh:
minh = pic.size[0]
if pic.size[1] < minv:
minv = pic.size[1]
for u in range(1, 4001):
pic = Image.open(train_dog + '/' + 'dog.' + str(u) + '.jpg')
if pic.size[0] < minh:
minh = pic.size[0]
if pic.size[1] < minv:
minv = pic.size[1]
train_cat_list = []
for p in range(1, 4001):
image = Image.open(train_cat + '/' + 'cat.' + str(p) + '.jpg')
image = image.resize((minh, minv))
image = image.convert(mode='L')
train_cat_list.append(image)
train_dog_list = []
for u in range(1, 4001):
image = Image.open(train_dog + '/' + 'dog.' + str(u) + '.jpg')
image = image.resize((minh, minv))
image = image.convert(mode='L')
train_dog_list.append(image)
x = np.empty((4001 + 4001, minh * minv))
index = 0
for pl in train_cat_list:
x[index] = np.array(pl).reshape(minh * minv)
index += 1
for ul in train_dog_list:
x[index] = np.array(ul).reshape(minh * minv)
index += 1
p = np.ones(4001)
u = np.zeros(4001)
y = np.concatenate((p, u), axis=0).reshape(x.shape[0], 1)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
x_train = (x_train - np.min(x_train)) / (np.max(x_train) - np.min(x_train))
x_test = (x_test - np.min(x_test)) / (np.max(x_test) - np.min(x_test))
def initialize_weights_and_bias(dimension):
w = np.full((dimension, 1), 0.01)
b = 0.0
return (w, b)
def sigmoid(z):
y_head = 1 / (1 + np.exp(-z))
return y_head
def forward_backward_propagation(w, b, x_train, y_train):
z = np.dot(w.T, x_train) + b
y_head = sigmoid(z)
loss = -y_train * np.log(y_head) - (1 - y_train) * np.log(1 - y_head)
cost = np.sum(loss) / x_train.shape[1]
derivative_weight = np.dot(x_train, (y_head - y_train).T) / x_train.shape[1]
derivative_bias = np.sum(y_head - y_train) / x_train.shape[1]
gradients = {'derivative_weight': derivative_weight, 'derivative_bias': derivative_bias}
return (cost, gradients)
def update(w, b, x_train, y_train, learning_rate, number_of_iterarion):
cost_list = []
cost_list2 = []
index = []
for i in range(number_of_iterarion):
cost, gradients = forward_backward_propagation(w, b, x_train, y_train)
cost_list.append(cost)
w = w - learning_rate * gradients['derivative_weight']
b = b - learning_rate * gradients['derivative_bias']
if i % 250 == 0:
cost_list2.append(cost)
index.append(i)
parameters = {'weight': w, 'bias': b}
plt.xticks(index, rotation='vertical')
return (parameters, gradients, cost_list)
def predict(w, b, x_test):
z = sigmoid(np.dot(w.T, x_test) + b)
Y_prediction = np.zeros((1, x_test.shape[1]))
for i in range(z.shape[1]):
if z[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate, num_iterations):
dimension = x_train.shape[0]
w, b = initialize_weights_and_bias(dimension)
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate, num_iterations)
y_prediction_test = predict(parameters['weight'], parameters['bias'], x_test)
y_prediction_train = predict(parameters['weight'], parameters['bias'], x_train)
logistic_regression(x_train, y_train, x_test, y_test, learning_rate=0.002, num_iterations=5001) | code |
17111741/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from PIL import Image
train_cat = '../input/training_set/training_set/cats'
train_dog = '../input/training_set/training_set/dogs'
test_cat = '../input/test_set/test_set/cats'
test_dog = '../input/test_set/test_set/dogs'
image_size = 128
Image.open(train_cat + '/' + 'cat.1.jpg')
Image.open('../input/training_set/training_set/dogs/dog.1.jpg')
minh, minv = (100000, 100000)
for p in range(1, 4001):
pic = Image.open(train_cat + '/' + 'cat.' + str(p) + '.jpg')
if pic.size[0] < minh:
minh = pic.size[0]
if pic.size[1] < minv:
minv = pic.size[1]
for u in range(1, 4001):
pic = Image.open(train_dog + '/' + 'dog.' + str(u) + '.jpg')
if pic.size[0] < minh:
minh = pic.size[0]
if pic.size[1] < minv:
minv = pic.size[1]
print(minh)
print(minv) | code |
49124471/cell_55 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import re
import string
def clean_text(text):
text = text.lower().strip()
text = ' '.join([w for w in text.split() if len(w) > 2])
text = re.sub('\\[.*?\\]', '', text)
text = re.sub('https?://\\S+|www\\.\\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
tokenizer = nltk.tokenize.RegexpTokenizer('\\w+')
train['comment'] = train['comment'].apply(tokenizer.tokenize)
def remove_stopwords(word_tokens):
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
train_word = train.explode('comment')
word_all_rate = train_word.comment.value_counts(ascending=True)
word_all_rate = word_all_rate[word_all_rate > 10]
word_rate_1 = train_word.loc[(train_word['rating'] >= 0) & (train_word['rating'] <= 1.999)]
word_rate_1 = word_rate_1.comment.value_counts(ascending=True)
word_rate_1 = word_rate_1[word_rate_1 > 10]
word_rate_2 = train_word.loc[(train_word['rating'] >= 2) & (train_word['rating'] <= 2.999)]
word_rate_2 = word_rate_2.comment.value_counts(ascending=True)
word_rate_2 = word_rate_2[word_rate_2 > 10]
word_rate_3 = train_word.loc[(train_word['rating'] >= 3) & (train_word['rating'] <= 3.999)]
word_rate_3 = word_rate_3.comment.value_counts(ascending=True)
word_rate_3 = word_rate_3[word_rate_3 > 10]
word_rate_4 = train_word.loc[(train_word['rating'] >= 4) & (train_word['rating'] <= 4.999)]
word_rate_4 = word_rate_4.comment.value_counts(ascending=True)
word_rate_4 = word_rate_4[word_rate_4 > 10]
word_rate_5 = train_word.loc[(train_word['rating'] >= 5) & (train_word['rating'] <= 5.999)]
word_rate_5 = word_rate_5.comment.value_counts(ascending=True)
word_rate_5 = word_rate_5[word_rate_5 > 10]
word_rate_6 = train_word.loc[(train_word['rating'] >= 6) & (train_word['rating'] <= 6.999)]
word_rate_6 = word_rate_6.comment.value_counts(ascending=True)
word_rate_6 = word_rate_6[word_rate_6 > 10]
word_rate_7 = train_word.loc[(train_word['rating'] >= 7) & (train_word['rating'] <= 7.999)]
word_rate_7 = word_rate_7.comment.value_counts(ascending=True)
word_rate_7 = word_rate_7[word_rate_7 > 10]
word_rate_8 = train_word.loc[(train_word['rating'] >= 8) & (train_word['rating'] <= 8.999)]
word_rate_8 = word_rate_8.comment.value_counts(ascending=True)
word_rate_8 = word_rate_8[word_rate_8 > 10]
word_rate_9 = train_word.loc[(train_word['rating'] >= 9) & (train_word['rating'] <= 9.999)]
word_rate_9 = word_rate_9.comment.value_counts(ascending=True)
word_rate_9 = word_rate_9[word_rate_9 > 10]
word_rate_10 = train_word.loc[train_word['rating'] >= 10]
word_rate_10 = word_rate_10.comment.value_counts(ascending=True)
word_rate_10 = word_rate_10[word_rate_10 > 10]
word_rate_list = [word_rate_1, word_rate_2, word_rate_3, word_rate_4, word_rate_5, word_rate_6, word_rate_7, word_rate_8, word_rate_9, word_rate_10]
rate = ['1_', '2_', '3_', '4_', '5_', '6_', '7_', '8_', '9_', '10_', 'predict']
def naive_bayes(text, word_all_rate, word_rate_1, smoothing):
if smoothing != True:
if (text in word_all_rate) & (text in word_rate_1):
return word_rate_1[text] / word_rate_1.size
else:
return 0
elif (text in word_all_rate) & (text in word_rate_1):
return (word_rate_1[text] + 1) / (word_rate_1.size + 10)
else:
return 1 / (word_rate_1.size + 10)
def predict_rate(df, word_all_rate, word_rate_list, rate):
df['comment'] = df['comment'].apply(tokenizer.tokenize)
df['comment'] = df['comment'].apply(remove_stopwords)
exploded = df.explode('comment')
for i in range(10):
exploded[i + 1] = exploded['comment'].apply(lambda x: naive_bayes(x, word_all_rate, word_rate_list[i - 1], 1))
for i in df.index:
ff = exploded.loc[exploded.index == i].prod()
max_ = -1
position = 0
for j, k in zip(range(10), rate):
df.loc[df.index == i, k] = ff[j + 1]
if max_ < ff[j + 1]:
max_ = ff[j + 1]
position = j + 1
df.loc[df.index == i, 'predict'] = position
return df
test_reduce_num = test[:500]
test_reduce_num = predict_rate(test_reduce_num, word_all_rate, word_rate_list, rate)
accuracy = test_reduce_num.loc[test_reduce_num['correct'] == 1].shape[0] / test_reduce_num.shape[0]
print('accuracy = ', accuracy, '\n\n\n') | code |
49124471/cell_29 | [
"text_plain_output_1.png"
] | train_word = train.explode('comment')
word_all_rate = train_word.comment.value_counts(ascending=True)
word_all_rate = word_all_rate[word_all_rate > 10]
word_all_rate | code |
49124471/cell_52 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import re
import string
def clean_text(text):
text = text.lower().strip()
text = ' '.join([w for w in text.split() if len(w) > 2])
text = re.sub('\\[.*?\\]', '', text)
text = re.sub('https?://\\S+|www\\.\\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
tokenizer = nltk.tokenize.RegexpTokenizer('\\w+')
train['comment'] = train['comment'].apply(tokenizer.tokenize)
def remove_stopwords(word_tokens):
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
train_word = train.explode('comment')
word_all_rate = train_word.comment.value_counts(ascending=True)
word_all_rate = word_all_rate[word_all_rate > 10]
word_rate_1 = train_word.loc[(train_word['rating'] >= 0) & (train_word['rating'] <= 1.999)]
word_rate_1 = word_rate_1.comment.value_counts(ascending=True)
word_rate_1 = word_rate_1[word_rate_1 > 10]
word_rate_2 = train_word.loc[(train_word['rating'] >= 2) & (train_word['rating'] <= 2.999)]
word_rate_2 = word_rate_2.comment.value_counts(ascending=True)
word_rate_2 = word_rate_2[word_rate_2 > 10]
word_rate_3 = train_word.loc[(train_word['rating'] >= 3) & (train_word['rating'] <= 3.999)]
word_rate_3 = word_rate_3.comment.value_counts(ascending=True)
word_rate_3 = word_rate_3[word_rate_3 > 10]
word_rate_4 = train_word.loc[(train_word['rating'] >= 4) & (train_word['rating'] <= 4.999)]
word_rate_4 = word_rate_4.comment.value_counts(ascending=True)
word_rate_4 = word_rate_4[word_rate_4 > 10]
word_rate_5 = train_word.loc[(train_word['rating'] >= 5) & (train_word['rating'] <= 5.999)]
word_rate_5 = word_rate_5.comment.value_counts(ascending=True)
word_rate_5 = word_rate_5[word_rate_5 > 10]
word_rate_6 = train_word.loc[(train_word['rating'] >= 6) & (train_word['rating'] <= 6.999)]
word_rate_6 = word_rate_6.comment.value_counts(ascending=True)
word_rate_6 = word_rate_6[word_rate_6 > 10]
word_rate_7 = train_word.loc[(train_word['rating'] >= 7) & (train_word['rating'] <= 7.999)]
word_rate_7 = word_rate_7.comment.value_counts(ascending=True)
word_rate_7 = word_rate_7[word_rate_7 > 10]
word_rate_8 = train_word.loc[(train_word['rating'] >= 8) & (train_word['rating'] <= 8.999)]
word_rate_8 = word_rate_8.comment.value_counts(ascending=True)
word_rate_8 = word_rate_8[word_rate_8 > 10]
word_rate_9 = train_word.loc[(train_word['rating'] >= 9) & (train_word['rating'] <= 9.999)]
word_rate_9 = word_rate_9.comment.value_counts(ascending=True)
word_rate_9 = word_rate_9[word_rate_9 > 10]
word_rate_10 = train_word.loc[train_word['rating'] >= 10]
word_rate_10 = word_rate_10.comment.value_counts(ascending=True)
word_rate_10 = word_rate_10[word_rate_10 > 10]
word_rate_list = [word_rate_1, word_rate_2, word_rate_3, word_rate_4, word_rate_5, word_rate_6, word_rate_7, word_rate_8, word_rate_9, word_rate_10]
rate = ['1_', '2_', '3_', '4_', '5_', '6_', '7_', '8_', '9_', '10_', 'predict']
def naive_bayes(text, word_all_rate, word_rate_1, smoothing):
if smoothing != True:
if (text in word_all_rate) & (text in word_rate_1):
return word_rate_1[text] / word_rate_1.size
else:
return 0
elif (text in word_all_rate) & (text in word_rate_1):
return (word_rate_1[text] + 1) / (word_rate_1.size + 10)
else:
return 1 / (word_rate_1.size + 10)
def predict_rate(df, word_all_rate, word_rate_list, rate):
df['comment'] = df['comment'].apply(tokenizer.tokenize)
df['comment'] = df['comment'].apply(remove_stopwords)
exploded = df.explode('comment')
for i in range(10):
exploded[i + 1] = exploded['comment'].apply(lambda x: naive_bayes(x, word_all_rate, word_rate_list[i - 1], 1))
for i in df.index:
ff = exploded.loc[exploded.index == i].prod()
max_ = -1
position = 0
for j, k in zip(range(10), rate):
df.loc[df.index == i, k] = ff[j + 1]
if max_ < ff[j + 1]:
max_ = ff[j + 1]
position = j + 1
df.loc[df.index == i, 'predict'] = position
return df
test_reduce_num = test[:500]
test_reduce_num = predict_rate(test_reduce_num, word_all_rate, word_rate_list, rate)
test_reduce_num | code |
49124471/cell_64 | [
"text_plain_output_1.png"
] | !pip install wordcloud | code |
49124471/cell_68 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import pandas as pd
import re
import string
import numpy as np
import pandas as pd
import re
import string
import nltk
pd.options.mode.chained_assignment = None
original_data = pd.read_csv('../input/boardgamegeek-reviews/bgg-15m-reviews.csv')
comment_rate = pd.DataFrame(original_data, columns=['comment', 'rating']).dropna()
def clean_text(text):
text = text.lower().strip()
text = ' '.join([w for w in text.split() if len(w) > 2])
text = re.sub('\\[.*?\\]', '', text)
text = re.sub('https?://\\S+|www\\.\\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
tokenizer = nltk.tokenize.RegexpTokenizer('\\w+')
train['comment'] = train['comment'].apply(tokenizer.tokenize)
def remove_stopwords(word_tokens):
stop_words = set(stopwords.words('english'))
filtered_sentence = [w for w in word_tokens if not w in stop_words]
filtered_sentence = []
for w in word_tokens:
if w not in stop_words:
filtered_sentence.append(w)
return filtered_sentence
train_word = train.explode('comment')
word_all_rate = train_word.comment.value_counts(ascending=True)
word_all_rate = word_all_rate[word_all_rate > 10]
word_rate_1 = train_word.loc[(train_word['rating'] >= 0) & (train_word['rating'] <= 1.999)]
word_rate_1 = word_rate_1.comment.value_counts(ascending=True)
word_rate_1 = word_rate_1[word_rate_1 > 10]
word_rate_2 = train_word.loc[(train_word['rating'] >= 2) & (train_word['rating'] <= 2.999)]
word_rate_2 = word_rate_2.comment.value_counts(ascending=True)
word_rate_2 = word_rate_2[word_rate_2 > 10]
word_rate_3 = train_word.loc[(train_word['rating'] >= 3) & (train_word['rating'] <= 3.999)]
word_rate_3 = word_rate_3.comment.value_counts(ascending=True)
word_rate_3 = word_rate_3[word_rate_3 > 10]
word_rate_4 = train_word.loc[(train_word['rating'] >= 4) & (train_word['rating'] <= 4.999)]
word_rate_4 = word_rate_4.comment.value_counts(ascending=True)
word_rate_4 = word_rate_4[word_rate_4 > 10]
word_rate_5 = train_word.loc[(train_word['rating'] >= 5) & (train_word['rating'] <= 5.999)]
word_rate_5 = word_rate_5.comment.value_counts(ascending=True)
word_rate_5 = word_rate_5[word_rate_5 > 10]
word_rate_6 = train_word.loc[(train_word['rating'] >= 6) & (train_word['rating'] <= 6.999)]
word_rate_6 = word_rate_6.comment.value_counts(ascending=True)
word_rate_6 = word_rate_6[word_rate_6 > 10]
word_rate_7 = train_word.loc[(train_word['rating'] >= 7) & (train_word['rating'] <= 7.999)]
word_rate_7 = word_rate_7.comment.value_counts(ascending=True)
word_rate_7 = word_rate_7[word_rate_7 > 10]
word_rate_8 = train_word.loc[(train_word['rating'] >= 8) & (train_word['rating'] <= 8.999)]
word_rate_8 = word_rate_8.comment.value_counts(ascending=True)
word_rate_8 = word_rate_8[word_rate_8 > 10]
word_rate_9 = train_word.loc[(train_word['rating'] >= 9) & (train_word['rating'] <= 9.999)]
word_rate_9 = word_rate_9.comment.value_counts(ascending=True)
word_rate_9 = word_rate_9[word_rate_9 > 10]
word_rate_10 = train_word.loc[train_word['rating'] >= 10]
word_rate_10 = word_rate_10.comment.value_counts(ascending=True)
word_rate_10 = word_rate_10[word_rate_10 > 10]
word_rate_list = [word_rate_1, word_rate_2, word_rate_3, word_rate_4, word_rate_5, word_rate_6, word_rate_7, word_rate_8, word_rate_9, word_rate_10]
rate = ['1_', '2_', '3_', '4_', '5_', '6_', '7_', '8_', '9_', '10_', 'predict']
def naive_bayes(text, word_all_rate, word_rate_1, smoothing):
if smoothing != True:
if (text in word_all_rate) & (text in word_rate_1):
return word_rate_1[text] / word_rate_1.size
else:
return 0
elif (text in word_all_rate) & (text in word_rate_1):
return (word_rate_1[text] + 1) / (word_rate_1.size + 10)
else:
return 1 / (word_rate_1.size + 10)
def predict_rate(df, word_all_rate, word_rate_list, rate):
df['comment'] = df['comment'].apply(tokenizer.tokenize)
df['comment'] = df['comment'].apply(remove_stopwords)
exploded = df.explode('comment')
for i in range(10):
exploded[i + 1] = exploded['comment'].apply(lambda x: naive_bayes(x, word_all_rate, word_rate_list[i - 1], 1))
for i in df.index:
ff = exploded.loc[exploded.index == i].prod()
max_ = -1
position = 0
for j, k in zip(range(10), rate):
df.loc[df.index == i, k] = ff[j + 1]
if max_ < ff[j + 1]:
max_ = ff[j + 1]
position = j + 1
df.loc[df.index == i, 'predict'] = position
return df
text = word_all_rate.sort_values(ascending=False)
input_test = input()
df = pd.DataFrame([[input_test]], columns=['comment'])
print('Processing your comment\n\n\n')
result = predict_rate(df, word_all_rate, word_rate_list, rate)
print('The rate prediction for your comment is ', result['predict'][0]) | code |
49124471/cell_66 | [
"image_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import re
import string
def clean_text(text):
text = text.lower().strip()
text = ' '.join([w for w in text.split() if len(w) > 2])
text = re.sub('\\[.*?\\]', '', text)
text = re.sub('https?://\\S+|www\\.\\S+', '', text)
text = re.sub('<.*?>+', '', text)
text = re.sub('[%s]' % re.escape(string.punctuation), '', text)
text = re.sub('\n', '', text)
text = re.sub('\\w*\\d\\w*', '', text)
return text
train_word = train.explode('comment')
word_all_rate = train_word.comment.value_counts(ascending=True)
word_all_rate = word_all_rate[word_all_rate > 10]
text = word_all_rate.sort_values(ascending=False)
text = text[:100]
text = text.index.map(str)
listToStr = ' '.join(map(str, text.format()))
from wordcloud import WordCloud, STOPWORDS
wordcloud = WordCloud(width=3000, height=2000, random_state=1, background_color='salmon', colormap='Pastel1', collocations=False, stopwords=STOPWORDS).generate(listToStr)
plot_cloud(wordcloud) | code |
32070892/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df[df['Whole year'] == df['Whole year'].max()]['Airport name'] | code |
32070892/cell_9 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_ys = df_y.sum().drop('Whole year', axis=1)
df_ys
plt.figure(figsize=(12, 10))
sns.heatmap(df_ys / 1000000, annot=True)
plt.title('Tot Pax Number per month') | code |
32070892/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df.head() | code |
32070892/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df.describe().transpose() | code |
32070892/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df['Airport name'].nunique() | code |
32070892/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32070892/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df.iloc[353]
df.iloc[601]
df_a = df.groupby('Airport name')
df_a.head() | code |
32070892/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_ys = df_y.sum().drop('Whole year', axis=1)
df_ys | code |
32070892/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df_asort = df['Whole year'].sort_values(ascending=False)
df_asort.head() | code |
32070892/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df.iloc[353] | code |
32070892/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df.iloc[353]
df.iloc[601] | code |
32070892/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a
df_a1 = df_a['Whole year'].sort_values(ascending=False)
df_a1.head(10) | code |
32070892/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df_y = df.groupby('Year')
df_a = df.groupby('Airport name').sum()
df_a | code |
32070892/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/russian-passenger-air-service-20072020/russian_passenger_air_service_2.csv')
df.info() | code |
88078658/cell_3 | [
"text_plain_output_1.png"
] | import math
import math
a = 123456
n_digit = math.floor(math.log10(a) + 1)
print(n_digit) | code |
104118983/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/polynomial/HeightVsWeight.csv')
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=10)
X_poly = poly_reg.fit_transform(X)
lin_reg = LinearRegression()
lin_reg.fit(X_poly, Y) | code |
104118983/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104118983/cell_7 | [
"image_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/polynomial/HeightVsWeight.csv')
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=10)
X_poly = poly_reg.fit_transform(X)
lin_reg = LinearRegression()
lin_reg.fit(X_poly, Y)
plt.scatter(X, Y, color='red')
plt.plot(X, lin_reg.predict(poly_reg.fit_transform(X)), color='blue', linewidth='5', alpha=0.7)
plt.title('Age v/s Height')
plt.xlabel('Age')
plt.ylabel('Height')
plt.show() | code |
104118983/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/polynomial/HeightVsWeight.csv')
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=10)
X_poly = poly_reg.fit_transform(X)
lin_reg = LinearRegression()
lin_reg.fit(X_poly, Y)
lin_reg.predict(poly_reg.fit_transform([[15]])) | code |
104118983/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/polynomial/HeightVsWeight.csv')
df.head(6) | code |
128029773/cell_1 | [
"text_plain_output_5.png",
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | model_path = "/kaggle/working/models/hydra/"
!pip install chardet | code |
129018278/cell_4 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
x_1 = 2 * np.random.rand(100, 1)
x_2 = 50 * np.random.rand(100, 1)
x_3 = 1000 * np.random.rand(100, 1)
y = 3 + 500 * x_1 + 20 * x_2 + x_3
fig, axs = plt.subplots(2, 2)
fig.tight_layout(h_pad=2, w_pad=2)
axs[0, 0].plot(x_1, y, 'k.')
axs[0, 0].set(xlabel='$x_1$', ylabel='y')
axs[0, 1].plot(x_2, y, 'k.')
axs[0, 1].set(xlabel='$x_2$', ylabel='y')
axs[1, 0].plot(x_3, y, 'k.')
axs[1, 0].set(xlabel='$x_3$', ylabel='y')
axs[1, 1].remove()
plt.show() | code |
129018278/cell_7 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
# Create simulated data
x_1 = 2 * np.random.rand(100, 1)
x_2 = 50 * np.random.rand(100, 1)
x_3 = 1000 * np.random.rand(100, 1)
# Calculate target variable based on y = 3 + 500x1 + 20x2 + x3 formula
y = 3 + 500 * x_1 + 20 * x_2 + x_3
# Plot the simulated data
fig, axs = plt.subplots(2, 2)
fig.tight_layout(h_pad=2, w_pad=2)
axs[0, 0].plot(x_1, y, 'k.')
axs[0, 0].set(xlabel='$x_1$', ylabel='y')
axs[0, 1].plot(x_2, y, 'k.')
axs[0, 1].set(xlabel='$x_2$', ylabel='y')
axs[1, 0].plot(x_3, y, 'k.')
axs[1, 0].set(xlabel='$x_3$', ylabel='y')
axs[1, 1].remove()
plt.show()
x_bias = np.ones((100, 1))
X = np.concatenate([x_bias, x_1, x_2, x_3], axis=1)
Xt = X.T
Xt_X_inv = np.linalg.inv(Xt.dot(X))
Xt_y = Xt.dot(y)
theta = Xt_X_inv.dot(Xt_y)
theta | code |
129018278/cell_5 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
# Create simulated data
x_1 = 2 * np.random.rand(100, 1)
x_2 = 50 * np.random.rand(100, 1)
x_3 = 1000 * np.random.rand(100, 1)
# Calculate target variable based on y = 3 + 500x1 + 20x2 + x3 formula
y = 3 + 500 * x_1 + 20 * x_2 + x_3
# Plot the simulated data
fig, axs = plt.subplots(2, 2)
fig.tight_layout(h_pad=2, w_pad=2)
axs[0, 0].plot(x_1, y, 'k.')
axs[0, 0].set(xlabel='$x_1$', ylabel='y')
axs[0, 1].plot(x_2, y, 'k.')
axs[0, 1].set(xlabel='$x_2$', ylabel='y')
axs[1, 0].plot(x_3, y, 'k.')
axs[1, 0].set(xlabel='$x_3$', ylabel='y')
axs[1, 1].remove()
plt.show()
x_bias = np.ones((100, 1))
X = np.concatenate([x_bias, x_1, x_2, x_3], axis=1)
print(X[:5]) | code |
2044063/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
plt.figure(figsize=(12, 8))
sns.countplot(data=df, y='username') | code |
2044063/cell_25 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10]
corpus = ' '.join(df['tweet '])
corpus = corpus.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus)
plt.axis('off')
mest = df[df['username'] == 'MESTAfrica']
corpu = ' '.join(df['tweet '])
corpu = corpu.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu)
plt.axis('off')
tony = df[df['username'] == 'TonyElumeluFDN']
corp = ' '.join(df['tweet '])
corp = corp.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corp)
plt.figure(figsize=(12, 15))
plt.imshow(wordcloud)
plt.axis('off')
plt.show() | code |
2044063/cell_4 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
df.head() | code |
2044063/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10]
corpus = ' '.join(df['tweet '])
corpus = corpus.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus)
plt.axis('off')
mest = df[df['username'] == 'MESTAfrica']
corpu = ' '.join(df['tweet '])
corpu = corpu.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu)
plt.figure(figsize=(12, 15))
plt.imshow(wordcloud)
plt.axis('off')
plt.show() | code |
2044063/cell_20 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10]
corpus = ' '.join(df['tweet '])
corpus = corpus.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus)
plt.figure(figsize=(12, 15))
plt.imshow(wordcloud)
plt.axis('off')
plt.show() | code |
2044063/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
pd.isnull(df).any() | code |
2044063/cell_29 | [
"image_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10]
corpus = ' '.join(df['tweet '])
corpus = corpus.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus)
plt.axis('off')
mest = df[df['username'] == 'MESTAfrica']
corpu = ' '.join(df['tweet '])
corpu = corpu.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu)
plt.axis('off')
mest[mest['retweets'] == 2157] | code |
2044063/cell_2 | [
"text_html_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud, STOPWORDS
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2044063/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
df.describe() | code |
2044063/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10] | code |
2044063/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10] | code |
2044063/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
df[df['retweets'] == 79537] | code |
2044063/cell_27 | [
"text_html_output_1.png"
] | from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
toptweeps = df.groupby('username')[['tweet ']].count()
toptweeps.sort_values('tweet ', ascending=False)[:10]
topretweets = df.groupby('username')[['retweets']].sum()
topretweets.sort_values('retweets', ascending=False)[:10]
corpus = ' '.join(df['tweet '])
corpus = corpus.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpus)
plt.axis('off')
mest = df[df['username'] == 'MESTAfrica']
corpu = ' '.join(df['tweet '])
corpu = corpu.replace('.', '. ')
wordcloud = WordCloud(stopwords=STOPWORDS, background_color='white', width=2400, height=2000).generate(corpu)
plt.axis('off')
mest.describe() | code |
2044063/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_excel('../input/tweets.xlsx', sheet_name='tweets')
df.info() | code |
74040076/cell_11 | [
"image_output_1.png"
] | from matplotlib.colors import ListedColormap
from sklearn.datasets import make_classification, make_blobs,make_gaussian_quantiles, make_circles,make_moons
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import numpy as np # linear algebra
n_sam = 250
n_fea = 8
n_clas = 2
df = make_classification(n_samples=n_sam, n_features=n_fea, n_classes=n_clas, random_state=10)
df2 = make_blobs(n_samples=n_sam, centers=2, random_state=10)
df3 = make_circles(n_samples=n_sam, random_state=10)
df4 = make_moons(n_samples=n_sam, random_state=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
df1_PCA = pca.fit_transform(df[0])
y = df[1]
df1 = (df1_PCA, y)
datasets = [df1, df2, df3, df4]
from sklearn.svm import SVC
classifiers = [SVC(kernel='linear', C=0.025), SVC(C=100, kernel='poly', degree=4), SVC(gamma=2, C=1)]
names = ['Linear SVM', 'Polinomial SVM', 'RBF SVM']
h = 0.02
from matplotlib.colors import ListedColormap
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
figure = plt.figure(figsize=(15, 9))
i = 1
for ds_cnt, ds in enumerate(datasets):
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
x_min, x_max = (X[:, 0].min() - 0.5, X[:, 0].max() + 0.5)
y_min, y_max = (X[:, 1].min() - 0.5, X[:, 1].max() + 0.5)
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title('Input data')
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k')
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
if hasattr(clf, 'decision_function'):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=0.8)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k')
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - 0.3, yy.min() + 0.3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show() | code |
104114403/cell_42 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
print('confusion matrix for training data is : \n', confusion_matrix(y_train, ypred_train_logr), '\n', '\n')
print('confusion_matrix for test data is : \n', confusion_matrix(y_test, ypred_test_logr), '\n') | code |
104114403/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
X.head() | code |
104114403/cell_25 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape) | code |
104114403/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
dummy = DummyClassifier(strategy='most_frequent', random_state=0)
dummy.fit(X_train, y_train)
ydummy_train = dummy.predict(X_train)
ydummy_test = dummy.predict(X_test)
print('Accuracy score for DummyClassifier is : \n \n', accuracy_score(y_test, ydummy_test)) | code |
104114403/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
dummy = DummyClassifier(strategy='most_frequent', random_state=0)
dummy.fit(X_train, y_train)
ydummy_train = dummy.predict(X_train)
print('Confusion matrix for DummyClassifier is : \n \n', confusion_matrix(y_train, ydummy_train)) | code |
104114403/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.info() | code |
104114403/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
print('First 8 Predictions for training data are: ', ypred_train_logr[:8])
print('First 8 Predictions for test data are: ', ypred_test_logr[:8]) | code |
104114403/cell_29 | [
"text_plain_output_1.png"
] | from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
dummy = DummyClassifier(strategy='most_frequent', random_state=0)
dummy.fit(X_train, y_train) | code |
104114403/cell_48 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
print('Accuracy for test data is : \n', accuracy_score(y_test, ypred_test_logr), '\n')
print('Recall for test data is : \n', recall_score(y_test, ypred_test_logr), '\n')
print('Precision for test data is : \n', precision_score(y_test, ypred_test_logr), '\n')
print('f1-score for test data is : \n', f1_score(y_test, ypred_test_logr), '\n') | code |
104114403/cell_50 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
yprob_test_logr = logr.predict_proba(X_test)
yprob_test_logr[0:5, :].round(3) | code |
104114403/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.dummy import DummyClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score, precision_score, accuracy_score
dummy = DummyClassifier(strategy='most_frequent', random_state=0)
dummy.fit(X_train, y_train)
ydummy_train = dummy.predict(X_train)
ydummy_test = dummy.predict(X_test)
print('Confusion matrix for DummyClassifier is : \n \n', confusion_matrix(y_test, ydummy_test)) | code |
104114403/cell_51 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
yprob_test_logr = logr.predict_proba(X_test)
yprob_test_logr[0:5, :].round(3)
print('Scores for threshold value of: ', 0.2, '\n')
print('Accuracy for test data is : \n', accuracy_score(y_test, yprob_test_logr[:, 1] > 0.2), '\n')
print('Recall for test data is : \n', recall_score(y_test, yprob_test_logr[:, 1] > 0.2), '\n')
print('Precision for test data is : \n', precision_score(y_test, yprob_test_logr[:, 1] > 0.2), '\n')
print('f1 score for test data is : \n', f1_score(y_test, yprob_test_logr[:, 1] > 0.2), '\n') | code |
104114403/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
corm = pima.iloc[:, :-1].corr()
masko = np.zeros_like(corm, dtype=np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize=(10, 5))
sns.heatmap(corm, mask=masko, cmap='coolwarm', annot=True) | code |
104114403/cell_46 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
from sklearn.metrics import recall_score, precision_score, accuracy_score
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10)
from sklearn.linear_model import LogisticRegression
logr = LogisticRegression(random_state=0)
logr.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score
ypred_train_logr = logr.predict(X_train)
ypred_test_logr = logr.predict(X_test)
print('Accuracy for train data is : \n', accuracy_score(y_train, ypred_train_logr), '\n')
print('Recall for train data is : \n', recall_score(y_train, ypred_train_logr), '\n')
print('Precision for train data is : \n', precision_score(y_train, ypred_train_logr), '\n')
print('f1-score for train data is : \n', f1_score(y_train, ypred_train_logr), '\n') | code |
104114403/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
pima.describe() | code |
104114403/cell_22 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pima = pd.read_csv('../input/pimacsv/pima.csv')
pima.shape
#Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below)
corm = pima.iloc[:,:-1].corr()
masko = np.zeros_like(corm, dtype = np.bool)
masko[np.triu_indices_from(masko)] = True
fig, ax = plt.subplots(figsize = (10,5))
sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True)
X = pima.loc[:, pima.columns != 'Outcome']
y = pima.loc[:, 'Outcome']
y.head() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.