path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2008232/cell_11
[ "image_output_1.png" ]
from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd import sqlite3 input = sqlite3.connect('../input/FPA_FOD_20170508.sqlite') df = pd.read_sql_query("SELECT * FROM 'Fires'", input) epoch = pd.to_datetime(0, unit='s').to_julian_date() df.DISCOVERY_DATE = pd.to_datetime(df.DISCOVERY_DATE - epoch, unit='D') df.CONT_DATE = pd.to_datetime(df.CONT_DATE - epoch, unit='D') df.index = pd.to_datetime(df.DISCOVERY_DATE) df_wa = df[df.STATE == 'WA'] # analysis for yearly burn area y=df_wa.FIRE_SIZE.resample('AS').sum().fillna(0) ax = y.plot(kind='bar',figsize=(10,6)) # set xaxis major labels # Make most of the ticklabels empty so the labels don't get too crowded ticklabels = ['']*len(y.index) # Every 4th ticklable shows the month and day #ticklabels[::5] = [item.strftime('%b %d') for item in y.index[::4]] # Every 12th ticklabel includes the year #ticklabels[::5] = [item.strftime('%b %d\n%Y') for item in y.index[::5]] ticklabels[::1] = [item.strftime('%Y') for item in y.index[::1]] ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels)) plt.gcf().autofmt_xdate() plt.xlabel('Year') plt.ylabel('Acres Burned'); plt.title('Acres Burned by Year'); # Extract the data we're interested in lat = df_wa['LATITUDE'].values lon = df_wa['LONGITUDE'].values fsize = df_wa['FIRE_SIZE'].values # Draw the map background fig = plt.figure(figsize=(17, 10)) m = Basemap(projection='mill',llcrnrlon=-124. ,llcrnrlat=45.3,urcrnrlon=-117 ,urcrnrlat=49.1, resolution = 'h', epsg = 4269) # do not know how to download the following background image with kaggel kernel, so I had to # comment out the command #m.arcgisimage(service='World_Physical_Map', xpixels = 5000, verbose= False) m.drawcoastlines(color='blue') m.drawcountries(color='blue') m.drawstates(color='blue') # scatter plot m.scatter(lon, lat, latlon=True, c=np.log10(fsize), s=fsize*.01, cmap='Set1', alpha=0.5) # create colorbar and legend plt.colorbar(label=r'$\log_{10}({\rm Size Acres})$',fraction=0.02, pad=0.04) plt.clim(3, 7) cause = df_wa.STAT_CAUSE_DESCR.value_counts() fig, ax = plt.subplots(figsize=(10, 10)) ax.pie(x=cause, labels=cause.index, rotatelabels=False, autopct='%.2f%%') plt.title('Fire Cause Distribution')
code
2008232/cell_7
[ "image_output_1.png" ]
from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd import sqlite3 input = sqlite3.connect('../input/FPA_FOD_20170508.sqlite') df = pd.read_sql_query("SELECT * FROM 'Fires'", input) epoch = pd.to_datetime(0, unit='s').to_julian_date() df.DISCOVERY_DATE = pd.to_datetime(df.DISCOVERY_DATE - epoch, unit='D') df.CONT_DATE = pd.to_datetime(df.CONT_DATE - epoch, unit='D') df.index = pd.to_datetime(df.DISCOVERY_DATE) df_wa = df[df.STATE == 'WA'] # analysis for yearly burn area y=df_wa.FIRE_SIZE.resample('AS').sum().fillna(0) ax = y.plot(kind='bar',figsize=(10,6)) # set xaxis major labels # Make most of the ticklabels empty so the labels don't get too crowded ticklabels = ['']*len(y.index) # Every 4th ticklable shows the month and day #ticklabels[::5] = [item.strftime('%b %d') for item in y.index[::4]] # Every 12th ticklabel includes the year #ticklabels[::5] = [item.strftime('%b %d\n%Y') for item in y.index[::5]] ticklabels[::1] = [item.strftime('%Y') for item in y.index[::1]] ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels)) plt.gcf().autofmt_xdate() plt.xlabel('Year') plt.ylabel('Acres Burned'); plt.title('Acres Burned by Year'); lat = df_wa['LATITUDE'].values lon = df_wa['LONGITUDE'].values fsize = df_wa['FIRE_SIZE'].values fig = plt.figure(figsize=(17, 10)) m = Basemap(projection='mill', llcrnrlon=-124.0, llcrnrlat=45.3, urcrnrlon=-117, urcrnrlat=49.1, resolution='h', epsg=4269) m.drawcoastlines(color='blue') m.drawcountries(color='blue') m.drawstates(color='blue') m.scatter(lon, lat, latlon=True, c=np.log10(fsize), s=fsize * 0.01, cmap='Set1', alpha=0.5) plt.colorbar(label='$\\log_{10}({\\rm Size Acres})$', fraction=0.02, pad=0.04) plt.clim(3, 7)
code
2008232/cell_16
[ "image_output_1.png" ]
from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd import sqlite3 input = sqlite3.connect('../input/FPA_FOD_20170508.sqlite') df = pd.read_sql_query("SELECT * FROM 'Fires'", input) epoch = pd.to_datetime(0, unit='s').to_julian_date() df.DISCOVERY_DATE = pd.to_datetime(df.DISCOVERY_DATE - epoch, unit='D') df.CONT_DATE = pd.to_datetime(df.CONT_DATE - epoch, unit='D') df.index = pd.to_datetime(df.DISCOVERY_DATE) df_wa = df[df.STATE == 'WA'] # analysis for yearly burn area y=df_wa.FIRE_SIZE.resample('AS').sum().fillna(0) ax = y.plot(kind='bar',figsize=(10,6)) # set xaxis major labels # Make most of the ticklabels empty so the labels don't get too crowded ticklabels = ['']*len(y.index) # Every 4th ticklable shows the month and day #ticklabels[::5] = [item.strftime('%b %d') for item in y.index[::4]] # Every 12th ticklabel includes the year #ticklabels[::5] = [item.strftime('%b %d\n%Y') for item in y.index[::5]] ticklabels[::1] = [item.strftime('%Y') for item in y.index[::1]] ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels)) plt.gcf().autofmt_xdate() plt.xlabel('Year') plt.ylabel('Acres Burned'); plt.title('Acres Burned by Year'); # Extract the data we're interested in lat = df_wa['LATITUDE'].values lon = df_wa['LONGITUDE'].values fsize = df_wa['FIRE_SIZE'].values # Draw the map background fig = plt.figure(figsize=(17, 10)) m = Basemap(projection='mill',llcrnrlon=-124. ,llcrnrlat=45.3,urcrnrlon=-117 ,urcrnrlat=49.1, resolution = 'h', epsg = 4269) # do not know how to download the following background image with kaggel kernel, so I had to # comment out the command #m.arcgisimage(service='World_Physical_Map', xpixels = 5000, verbose= False) m.drawcoastlines(color='blue') m.drawcountries(color='blue') m.drawstates(color='blue') # scatter plot m.scatter(lon, lat, latlon=True, c=np.log10(fsize), s=fsize*.01, cmap='Set1', alpha=0.5) # create colorbar and legend plt.colorbar(label=r'$\log_{10}({\rm Size Acres})$',fraction=0.02, pad=0.04) plt.clim(3, 7) cause = df_wa.STAT_CAUSE_DESCR.value_counts() # plot pie chart for cause distribution fig,ax = plt.subplots(figsize=(10,10)) ax.pie(x=cause,labels=cause.index,rotatelabels=False, autopct='%.2f%%'); plt.title('Fire Cause Distribution'); # group cause colume in 2 year segments df_wa_cause = df_wa.groupby(pd.Grouper(key='DISCOVERY_DATE', freq='2AS'))['STAT_CAUSE_DESCR'].value_counts() ticklabels = ['1992 - 1993','1994 - 1995','1996 - 1997','1998 - 1999','2000 - 2001','2002 - 2003','2004 - 2005', '2006 - 2007','2008 - 2009','2010 - 2011','2012 - 2013','2014 - 2015'] df_wa_cause # Fire Cause Distribution 2 Year Windows df_wa_cause_us = df_wa_cause.unstack() ax = df_wa_cause_us.plot(kind='bar',x=df_wa_cause_us.index,stacked=True,figsize=(10,6)) plt.title('Fire Cause Distribution 2 Year Window') plt.xlabel('2 Year Window') plt.ylabel('Number Fires') ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels)) ax.yaxis.grid(False,'minor') # turn off minor tic grid lines ax.yaxis.grid(True,'major') # turn on major tic grid lines; plt.gcf().autofmt_xdate() fig = plt.figure() fig.set_figheight(10) fig.set_figwidth(15) plt.subplots_adjust(hspace=0.5) plt.subplot(211) plt.title('Lightning Caused') plt.xlabel('Fire Size') plt.grid() plt.ylabel('Number Wildfires') plt.hist(df_wa[df_wa['STAT_CAUSE_DESCR'] == 'Equipment Use']['FIRE_SIZE'], bins=20, bottom=0.1) plt.semilogy() plt.subplot(212) plt.title('Equipment Use Caused') plt.xlabel('Fire Size') plt.ylabel('Number Wildfires') plt.grid() plt.hist(df_wa[df_wa['STAT_CAUSE_DESCR'] == 'Lightning']['FIRE_SIZE'], bins=20, bottom=0.1) plt.semilogy()
code
2008232/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as ticker import pandas as pd import sqlite3 input = sqlite3.connect('../input/FPA_FOD_20170508.sqlite') df = pd.read_sql_query("SELECT * FROM 'Fires'", input) epoch = pd.to_datetime(0, unit='s').to_julian_date() df.DISCOVERY_DATE = pd.to_datetime(df.DISCOVERY_DATE - epoch, unit='D') df.CONT_DATE = pd.to_datetime(df.CONT_DATE - epoch, unit='D') df.index = pd.to_datetime(df.DISCOVERY_DATE) df_wa = df[df.STATE == 'WA'] y = df_wa.FIRE_SIZE.resample('AS').sum().fillna(0) ax = y.plot(kind='bar', figsize=(10, 6)) ticklabels = [''] * len(y.index) ticklabels[::1] = [item.strftime('%Y') for item in y.index[::1]] ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels)) plt.gcf().autofmt_xdate() plt.xlabel('Year') plt.ylabel('Acres Burned') plt.title('Acres Burned by Year')
code
16164281/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) target_count = dataset.target.value_counts() print('Class 0:', target_count[0]) print('Class 1:', target_count[1]) print('Proportion:', round(target_count[0] / target_count[1], 2), ': 1') target_count.plot(kind='bar', title='Count (target)')
code
16164281/cell_6
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import numpy as np # linear algebra pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) skf = StratifiedKFold(random_state=1, n_splits=2) resultados = [] for train, test in skf.split(X_train, y_train): pipeline_1.fit(X_train[train], y_train[train]) resultado = pipeline_1.score(X_train[test], y_train[test]) resultados.append(resultado) from sklearn.model_selection import cross_val_score resultados = cross_val_score(estimator=pipeline_1, X=X_train, y=y_train, cv=5, n_jobs=1) print('CV accuracy scores: %s' % resultados) print('CV accuracy: %.3f +/- %.3f' % (np.mean(resultados), np.std(resultados)))
code
16164281/cell_11
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from imblearn.over_sampling import SMOTE from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) skf = StratifiedKFold(random_state=1, n_splits=2) resultados = [] for train, test in skf.split(X_train, y_train): pipeline_1.fit(X_train[train], y_train[train]) resultado = pipeline_1.score(X_train[test], y_train[test]) resultados.append(resultado) from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=1, ratio=1.0) X_train_res, y_train_res = sm.fit_sample(X_train, y_train) pipeline_1.fit(X_train_res, y_train_res) print('Resultado: %.3f' % pipeline_1.score(X_test, y_test)) showConfusionMat(pipeline_1, X_test, y_test) pipeline_svm.fit(X_train_res, y_train_res) print('Resultado: %.3f' % pipeline_svm.score(X_test, y_test)) showConfusionMat(pipeline_svm, X_test, y_test)
code
16164281/cell_19
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import xgboost as xgb dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) import xgboost as xgb gbm = xgb.XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.1) gbm.fit(X_train_res, y_train_res) from sklearn.model_selection import GridSearchCV pipe = Pipeline([('gbm', xgb.XGBClassifier())]) param_grid = [{'gbm__max_depth': [3, 4, 5], 'gbm__n_estimators': [250, 300, 350], 'gbm__learning_rate': [0.2, 0.1, 0.5]}] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', cv=5, n_jobs=-1) gs = gs.fit(X_train_res, y_train_res) showConfusionMat(gs, X_test, y_test)
code
16164281/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input')) from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline
code
16164281/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) skf = StratifiedKFold(random_state=1, n_splits=2) resultados = [] for train, test in skf.split(X_train, y_train): pipeline_1.fit(X_train[train], y_train[train]) resultado = pipeline_1.score(X_train[test], y_train[test]) resultados.append(resultado) print('Entrenamiento:') print(y_train) print('Test:') print(y_test) showConfusionMat(pipeline_1, X_test, y_test) showConfusionMat(pipeline_svm, X_test, y_test)
code
16164281/cell_18
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import xgboost as xgb dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) import xgboost as xgb gbm = xgb.XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.1) gbm.fit(X_train_res, y_train_res) from sklearn.model_selection import GridSearchCV pipe = Pipeline([('gbm', xgb.XGBClassifier())]) param_grid = [{'gbm__max_depth': [3, 4, 5], 'gbm__n_estimators': [250, 300, 350], 'gbm__learning_rate': [0.2, 0.1, 0.5]}] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', cv=5, n_jobs=-1) gs = gs.fit(X_train_res, y_train_res) print(gs.best_score_) print(gs.best_params_)
code
16164281/cell_15
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import xgboost as xgb dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) import xgboost as xgb gbm = xgb.XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.1) gbm.fit(X_train_res, y_train_res) print('Resultado: %.3f' % gbm.score(X_test, y_test)) showConfusionMat(gbm, X_test, y_test)
code
16164281/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import xgboost as xgb dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) target_count = dataset.target.value_counts() import xgboost as xgb gbm = xgb.XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.1) gbm.fit(X_train_res, y_train_res) print(list(dataset.columns.values[0:-1])) print(X[0]) print(list(gbm.feature_importances_))
code
16164281/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) print('Resultado: %.3f' % pipeline_1.score(X_test, y_test)) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) print('Resultado: %.3f' % pipeline_svm.score(X_test, y_test))
code
16164281/cell_14
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split, StratifiedKFold import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) from sklearn.ensemble import RandomForestClassifier rmfc = RandomForestClassifier(n_estimators=100) rmfc = rmfc.fit(X_train_res, y_train_res) print('Resultado: %.3f' % rmfc.score(X_test, y_test)) showConfusionMat(rmfc, X_test, y_test)
code
16164281/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import xgboost as xgb dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) import xgboost as xgb gbm = xgb.XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.1) gbm.fit(X_train_res, y_train_res) from sklearn.model_selection import GridSearchCV pipe = Pipeline([('gbm', xgb.XGBClassifier())]) param_grid = [{'gbm__max_depth': [3, 4, 5], 'gbm__n_estimators': [250, 300, 350], 'gbm__learning_rate': [0.2, 0.1, 0.5]}] gs = GridSearchCV(estimator=pipe, param_grid=param_grid, scoring='accuracy', cv=5, n_jobs=-1) gs = gs.fit(X_train_res, y_train_res) from sklearn.model_selection import GridSearchCV pipe_gbm = Pipeline([('gbm', xgb.XGBClassifier())]) param_grid = [{'gbm__max_depth': [3, 4, 5], 'gbm__n_estimators': [250, 300, 350], 'gbm__learning_rate': [0.2, 0.1, 0.5]}] gs2 = GridSearchCV(estimator=pipe_gbm, param_grid=param_grid, scoring='f1', cv=5, n_jobs=-1) gs2 = gs2.fit(X_train_res, y_train_res) print(gs2.best_score_) print(gs2.best_params_) showConfusionMat(gs2, X_test, y_test)
code
16164281/cell_12
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn dataset = pd.read_csv('../input/predictnav-beta/dataset_beta.csv') dataset.drop(['ip_hash', 'fecha', 'lang', 'country'], axis=1, inplace=True) X, y = (dataset.iloc[:, 0:-1].values, dataset.iloc[:, -1].values) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.metrics import confusion_matrix import seaborn as sn import matplotlib.pyplot as plt def showConfusionMat(pipe, X_test, y_test): y_pred = pipe.predict(X_test) confmat = confusion_matrix(y_true=y_test, y_pred=y_pred) df_cm = pd.DataFrame(confmat, ['F', 'T'], ['F', 'T']) sn.set(font_scale=1.4) pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) skf = StratifiedKFold(random_state=1, n_splits=2) resultados = [] for train, test in skf.split(X_train, y_train): pipeline_1.fit(X_train[train], y_train[train]) resultado = pipeline_1.score(X_train[test], y_train[test]) resultados.append(resultado) from sklearn.model_selection import cross_val_score resultados = cross_val_score(estimator=pipeline_1, X=X_train, y=y_train, cv=5, n_jobs=1) from imblearn.over_sampling import SMOTE sm = SMOTE(random_state=1, ratio=1.0) X_train_res, y_train_res = sm.fit_sample(X_train, y_train) pipeline_1.fit(X_train_res, y_train_res) pipeline_svm.fit(X_train_res, y_train_res) from sklearn.model_selection import cross_val_score resultados = cross_val_score(estimator=pipeline_1, X=X_train_res, y=y_train_res, cv=5) print('CV accuracy scores: %s' % resultados) print('CV accuracy: %.3f +/- %.3f' % (np.mean(resultados), np.std(resultados)))
code
16164281/cell_5
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC pipeline_1 = Pipeline([('scl', StandardScaler()), ('pca', PCA(n_components=2)), ('clf', LogisticRegression(random_state=1))]) pipeline_1.fit(X_train, y_train) from sklearn.svm import SVC pipeline_svm = Pipeline([('scl', StandardScaler()), ('clf', SVC(kernel='linear', C=0.05, random_state=1))]) pipeline_svm.fit(X_train, y_train) skf = StratifiedKFold(random_state=1, n_splits=2) resultados = [] for train, test in skf.split(X_train, y_train): pipeline_1.fit(X_train[train], y_train[train]) resultado = pipeline_1.score(X_train[test], y_train[test]) resultados.append(resultado) print(resultado)
code
73067458/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') ## Correlations correlations = X_full[my_features].corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(correlations, square=True, cbar=True, annot=True, vmax=.9); ## Box Plot for Outliers fig = plt.figure(figsize=(18,6)) sns.boxplot(data=X_full[numeric_features], orient="h", palette="Set2"); plt.xticks(fontsize= 14) plt.title('Box plot of numerical columns', fontsize=16); plt.xticks(fontsize=14) from scipy import stats def treatoutliers(df=None, columns=None, factor=1.5, method='IQR', treatment='cap'): for column in columns: if method == 'STD': permissable_std = factor * df[column].std() col_mean = df[column].mean() floor, ceil = (col_mean - permissable_std, col_mean + permissable_std) elif method == 'IQR': Q1 = df[column].quantile(0.25) Q3 = df[column].quantile(0.75) IQR = Q3 - Q1 floor, ceil = (Q1 - factor * IQR, Q3 + factor * IQR) if treatment == 'remove': df = df[(df[column] >= floor) & (df[column] <= ceil)] elif treatment == 'cap': df[column] = df[column].clip(floor, ceil) return df for colName in [['target', 'cont0', 'cont6', 'cont8']]: X_full = treatoutliers(df=X_full, columns=colName, treatment='cap') plt.xticks(fontsize=14) fig = plt.figure(figsize=(18, 6)) sns.boxplot(data=X_full[numeric_features], orient='h', palette='Set2') plt.xticks(fontsize=14) plt.title('Box plot of numerical columns after handling Outliers', fontsize=16)
code
73067458/cell_9
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features print('categorical_features:', categorical_features) print('numeric_features:', numeric_features) print('my_features:', my_features) numeric_features.remove('target') print('numeric_features minus target column:', numeric_features)
code
73067458/cell_25
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') categorical_features = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() <= 15 and X_train_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features print('categorical_features', categorical_features) print('numeric_features', numeric_features) print('my_features', my_features)
code
73067458/cell_23
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') from scipy import stats def treatoutliers(df=None, columns=None, factor=1.5, method='IQR', treatment='cap'): for column in columns: if method == 'STD': permissable_std = factor * df[column].std() col_mean = df[column].mean() floor, ceil = (col_mean - permissable_std, col_mean + permissable_std) elif method == 'IQR': Q1 = df[column].quantile(0.25) Q3 = df[column].quantile(0.75) IQR = Q3 - Q1 floor, ceil = (Q1 - factor * IQR, Q3 + factor * IQR) if treatment == 'remove': df = df[(df[column] >= floor) & (df[column] <= ceil)] elif treatment == 'cap': df[column] = df[column].clip(floor, ceil) return df for colName in [['target', 'cont0', 'cont6', 'cont8']]: X_full = treatoutliers(df=X_full, columns=colName, treatment='cap') X_full.dropna(axis=0, subset=['target'], inplace=True) y = X_full['target'] X_full.drop(['target'], axis=1, inplace=True) X_full.head()
code
73067458/cell_20
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') ## Correlations correlations = X_full[my_features].corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(correlations, square=True, cbar=True, annot=True, vmax=.9); ## Box Plot for Outliers fig = plt.figure(figsize=(18,6)) sns.boxplot(data=X_full[numeric_features], orient="h", palette="Set2"); plt.xticks(fontsize= 14) plt.title('Box plot of numerical columns', fontsize=16); plt.xticks(fontsize=14) from scipy import stats def treatoutliers(df=None, columns=None, factor=1.5, method='IQR', treatment='cap'): for column in columns: if method == 'STD': permissable_std = factor * df[column].std() col_mean = df[column].mean() floor, ceil = (col_mean - permissable_std, col_mean + permissable_std) elif method == 'IQR': Q1 = df[column].quantile(0.25) Q3 = df[column].quantile(0.75) IQR = Q3 - Q1 floor, ceil = (Q1 - factor * IQR, Q3 + factor * IQR) if treatment == 'remove': df = df[(df[column] >= floor) & (df[column] <= ceil)] elif treatment == 'cap': df[column] = df[column].clip(floor, ceil) return df for colName in [['target', 'cont0', 'cont6', 'cont8']]: X_full = treatoutliers(df=X_full, columns=colName, treatment='cap') sns.boxplot(data=X_full[['target']], orient='h', palette='Set2') plt.xticks(fontsize=14) plt.title('Box plot of target column after handling Outliers', fontsize=16)
code
73067458/cell_6
[ "text_plain_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.info() print('*' * 100) X_full.isnull().sum()
code
73067458/cell_29
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') categorical_features = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() <= 15 and X_train_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features X_train = X_train_full[my_features] X_valid = X_valid_full[my_features] X_test = X_test_full[my_features] X_train.shape
code
73067458/cell_11
[ "text_html_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') X_full[numeric_features].hist(figsize=(24, 12))
code
73067458/cell_19
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') from scipy import stats def treatoutliers(df=None, columns=None, factor=1.5, method='IQR', treatment='cap'): for column in columns: if method == 'STD': permissable_std = factor * df[column].std() col_mean = df[column].mean() floor, ceil = (col_mean - permissable_std, col_mean + permissable_std) elif method == 'IQR': Q1 = df[column].quantile(0.25) Q3 = df[column].quantile(0.75) IQR = Q3 - Q1 floor, ceil = (Q1 - factor * IQR, Q3 + factor * IQR) if treatment == 'remove': print(treatment, column) df = df[(df[column] >= floor) & (df[column] <= ceil)] elif treatment == 'cap': print(treatment, column) df[column] = df[column].clip(floor, ceil) return df for colName in [['target', 'cont0', 'cont6', 'cont8']]: X_full = treatoutliers(df=X_full, columns=colName, treatment='cap') X_full.info()
code
73067458/cell_32
[ "text_plain_output_1.png" ]
rans = 42 def log_transform(x): return np.log(x + 1) transformer = FunctionTransformer(log_transform) numerical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='mean')), ('scaler', StandardScaler())]) categorical_transformer = Pipeline(steps=[('imputer', SimpleImputer(strategy='most_frequent')), ('onehot', OneHotEncoder(handle_unknown='ignore'))]) preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numeric_features), ('cat', categorical_transformer, categorical_features)], remainder='passthrough')
code
73067458/cell_28
[ "image_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') categorical_features = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() <= 15 and X_train_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features X_train = X_train_full[my_features] X_valid = X_valid_full[my_features] X_test = X_test_full[my_features] X_train.describe(include='all')
code
73067458/cell_8
[ "text_plain_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() X_full.describe(include='all')
code
73067458/cell_15
[ "text_html_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') ## Correlations correlations = X_full[my_features].corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(correlations, square=True, cbar=True, annot=True, vmax=.9); fig = plt.figure(figsize=(18, 6)) sns.boxplot(data=X_full[numeric_features], orient='h', palette='Set2') plt.xticks(fontsize=14) plt.title('Box plot of numerical columns', fontsize=16)
code
73067458/cell_3
[ "text_html_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb print('Tensor Flow:', tf.__version__) print('SciKit Learn:', sk.__version__) print('Pandas:', pd.__version__) print('Numpy:', np.__version__) print('Seaborn:', sns.__version__) print('MatPlot Library:', mpl.__version__) print('XG Boost:', xgb.__version__)
code
73067458/cell_17
[ "text_plain_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') ## Correlations correlations = X_full[my_features].corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(correlations, square=True, cbar=True, annot=True, vmax=.9); ## Box Plot for Outliers fig = plt.figure(figsize=(18,6)) sns.boxplot(data=X_full[numeric_features], orient="h", palette="Set2"); plt.xticks(fontsize= 14) plt.title('Box plot of numerical columns', fontsize=16); sns.boxplot(data=X_full[['target']], orient='h', palette='Set2') plt.xticks(fontsize=14) plt.title('Box plot of target column', fontsize=16)
code
73067458/cell_10
[ "text_plain_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') correlations = X_full[my_features].corr() f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(correlations, square=True, cbar=True, annot=True, vmax=0.9)
code
73067458/cell_27
[ "text_plain_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') categorical_features = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() <= 15 and X_train_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features X_train = X_train_full[my_features] X_valid = X_valid_full[my_features] X_test = X_test_full[my_features] X_train.head()
code
73067458/cell_37
[ "text_html_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) result_df = pd.DataFrame({'Actual': y_valid, 'Predicted': preds_valid, 'Diff': preds_valid - y_valid}) result_df['Diff'].round().value_counts()
code
73067458/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.isnull().sum() categorical_features = [cname for cname in X_full.columns if X_full[cname].nunique() <= 15 and X_full[cname].dtype == 'object'] numeric_features = [cname for cname in X_full.columns if X_full[cname].dtype in ['int64', 'float64']] my_features = categorical_features + numeric_features numeric_features.remove('target') X_full[numeric_features].hist(figsize=(24, 12), log=True)
code
73067458/cell_5
[ "text_plain_output_1.png" ]
import matplotlib as mpl import numpy as np import pandas as pd import seaborn as sns import sklearn as sk import tensorflow as tf import xgboost as xgb X_full = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) X_full.head()
code
73067458/cell_36
[ "text_html_output_1.png" ]
clf = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) final_model = clf.fit(X_train, y_train) preds_valid = final_model.predict(X_valid) print('MAE:', mean_absolute_error(y_valid, preds_valid)) print('RMSE:', mean_squared_error(y_valid, preds_valid, squared=False))
code
32062582/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_test.head()
code
32062582/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_world.head()
code
32062582/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') print('Minimum date from test set: {}'.format(df_test['Date'].min())) print('Maximum date from test set: {}'.format(df_test['Date'].max()))
code
32062582/cell_26
[ "text_html_output_1.png" ]
from xgboost import XGBRegressor import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_map = df_train.copy() df_map['Date'] = df_map['Date'].astype(str) df_map = df_map.groupby(['Date', 'Country_Region'], as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_train[df_train.Country_Region == 'India'].Date.min() def create_features(df): df['day'] = df['Date'].dt.day df['month'] = df['Date'].dt.month df['dayofweek'] = df['Date'].dt.dayofweek df['dayofyear'] = df['Date'].dt.dayofyear df['quarter'] = df['Date'].dt.quarter df['weekofyear'] = df['Date'].dt.weekofyear return df df_train = create_features(df_train) columns = ['day', 'month', 'dayofweek', 'dayofyear', 'quarter', 'weekofyear', 'Province_State', 'Country_Region', 'ConfirmedCases', 'Fatalities'] df_train = df_train[columns] df_dev = df_dev[columns] df_train.Province_State.fillna('NaN', inplace=True) df_test.Province_State.fillna('NaN', inplace=True) df_test = create_features(df_test) columns = ['day', 'month', 'dayofweek', 'dayofyear', 'quarter', 'weekofyear'] df_train.dtypes submission = [] for country in df_train.Country_Region.unique(): df_train1 = df_train[df_train['Country_Region'] == country] for state in df_train1.Province_State.unique(): df_train2 = df_train1[df_train1['Province_State'] == state] df_train3 = df_train2.drop(['Country_Region', 'Province_State'], axis=1) train = df_train3.values X_train, y_train = (train[:, :-2], train[:, -2:]) model1 = XGBRegressor(random_state=1, n_estimators=1000) model1.fit(X_train, y_train[:, 0]) model2 = XGBRegressor(random_state=1, n_estimators=1000) model2.fit(X_train, y_train[:, 1]) df_test1 = df_test[(df_test['Country_Region'] == country) & (df_test['Province_State'] == state)] ForecastId = df_test1.ForecastId.values df_test2 = df_test1[columns] y_pred1 = model1.predict(df_test2.values).astype(int) y_pred2 = model2.predict(df_test2.values).astype(int) for i in range(len(y_pred1)): d = {'ForecastId': ForecastId[i], 'ConfirmedCases': y_pred1[i], 'Fatalities': y_pred2[i]} submission.append(d) len(submission)
code
32062582/cell_19
[ "text_html_output_2.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_map = df_train.copy() df_map['Date'] = df_map['Date'].astype(str) df_map = df_map.groupby(['Date', 'Country_Region'], as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_train[df_train.Country_Region == 'India'].Date.min() def create_features(df): df['day'] = df['Date'].dt.day df['month'] = df['Date'].dt.month df['dayofweek'] = df['Date'].dt.dayofweek df['dayofyear'] = df['Date'].dt.dayofyear df['quarter'] = df['Date'].dt.quarter df['weekofyear'] = df['Date'].dt.weekofyear return df df_train = create_features(df_train) df_train.head()
code
32062582/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') display(df_train.head()) display(df_train.describe()) display(df_train.info())
code
32062582/cell_14
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_map = df_train.copy() df_map['Date'] = df_map['Date'].astype(str) df_map = df_map.groupby(['Date', 'Country_Region'], as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_train[df_train.Country_Region == 'India'].Date.min()
code
32062582/cell_22
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_map = df_train.copy() df_map['Date'] = df_map['Date'].astype(str) df_map = df_map.groupby(['Date', 'Country_Region'], as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_train[df_train.Country_Region == 'India'].Date.min() def create_features(df): df['day'] = df['Date'].dt.day df['month'] = df['Date'].dt.month df['dayofweek'] = df['Date'].dt.dayofweek df['dayofyear'] = df['Date'].dt.dayofyear df['quarter'] = df['Date'].dt.quarter df['weekofyear'] = df['Date'].dt.weekofyear return df df_train = create_features(df_train) columns = ['day', 'month', 'dayofweek', 'dayofyear', 'quarter', 'weekofyear', 'Province_State', 'Country_Region', 'ConfirmedCases', 'Fatalities'] df_train = df_train[columns] df_dev = df_dev[columns] df_train.Province_State.fillna('NaN', inplace=True) df_test.Province_State.fillna('NaN', inplace=True) df_test = create_features(df_test) columns = ['day', 'month', 'dayofweek', 'dayofyear', 'quarter', 'weekofyear'] df_train.dtypes
code
32062582/cell_10
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import plotly.graph_objects as go df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) fig = go.Figure(data=[go.Bar(name='Cases', x=df_world['Date'], y=df_world['Daily Cases']), go.Bar(name='Deaths', x=df_world['Date'], y=df_world['Daily Deaths'])]) fig.update_layout(barmode='overlay', title='Worldwide daily Case and Death count') fig.show()
code
32062582/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') def add_daily_measures(df): df.loc[0, 'Daily Cases'] = df.loc[0, 'ConfirmedCases'] df.loc[0, 'Daily Deaths'] = df.loc[0, 'Fatalities'] for i in range(1, len(df_world)): df.loc[i, 'Daily Cases'] = df.loc[i, 'ConfirmedCases'] - df.loc[i - 1, 'ConfirmedCases'] df.loc[i, 'Daily Deaths'] = df.loc[i, 'Fatalities'] - df.loc[i - 1, 'Fatalities'] df.loc[0, 'Daily Cases'] = 0 df.loc[0, 'Daily Deaths'] = 0 return df df_world = df_train.copy() df_world = df_world.groupby('Date', as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_world = add_daily_measures(df_world) df_map = df_train.copy() df_map['Date'] = df_map['Date'].astype(str) df_map = df_map.groupby(['Date', 'Country_Region'], as_index=False)['ConfirmedCases', 'Fatalities'].sum() df_map.head()
code
32062582/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') print('Minimum date from training set: {}'.format(df_train['Date'].min())) print('Maximum date from training set: {}'.format(df_train['Date'].max()))
code
128019479/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_test.shape Data_train.columns Data_test.columns Data_train.isnull().sum().sum() Data_test.isnull().sum().sum() Data_train.corr() feature_name = list(Data_train.columns[1:-1]) Data_train.drop('id', inplace=True, axis=1) id = Data_test['id'] Data_test.drop('id', inplace=True, axis=1) Data_train.skew() feature_name = ['MedInc', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'MedHouseVal'] for i in range(len(feature_name)): q1 = Data_train[feature_name[i]].quantile(0.25) q2 = Data_train[feature_name[i]].quantile(0.75) Data_train[feature_name[i]] = np.where(Data_train[feature_name[i]] < q1, q1, Data_train[feature_name[i]]) Data_train[feature_name[i]] = np.where(Data_train[feature_name[i]] > q2, q2, Data_train[feature_name[i]]) Data_train.skew()
code
128019479/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_test.shape Data_test.columns Data_test.isnull().sum().sum()
code
128019479/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.describe()
code
128019479/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_test.shape Data_train.columns Data_test.columns Data_train.isnull().sum().sum() Data_test.isnull().sum().sum() Data_train.corr() feature_name = list(Data_train.columns[1:-1]) Data_train.drop('id', inplace=True, axis=1) id = Data_test['id'] Data_test.drop('id', inplace=True, axis=1) Data_train.skew() feature_name = ['MedInc', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup', 'MedHouseVal'] for i in range(len(feature_name)): q1 = Data_train[feature_name[i]].quantile(0.25) q2 = Data_train[feature_name[i]].quantile(0.75) Data_train[feature_name[i]] = np.where(Data_train[feature_name[i]] < q1, q1, Data_train[feature_name[i]]) Data_train[feature_name[i]] = np.where(Data_train[feature_name[i]] > q2, q2, Data_train[feature_name[i]]) Data_train.skew() Data_test.skew() feature_name = ['MedInc', 'AveRooms', 'AveBedrms', 'Population', 'AveOccup'] for i in range(len(feature_name)): q1 = Data_test[feature_name[i]].quantile(0.25) q2 = Data_test[feature_name[i]].quantile(0.75) Data_test[feature_name[i]] = np.where(Data_test[feature_name[i]] < q1, q1, Data_test[feature_name[i]]) Data_test[feature_name[i]] = np.where(Data_test[feature_name[i]] > q2, q2, Data_test[feature_name[i]]) Data_test.skew()
code
128019479/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_test.shape Data_train.columns Data_test.columns Data_train.isnull().sum().sum() Data_test.isnull().sum().sum() Data_train.corr() feature_name = list(Data_train.columns[1:-1]) Data_train.drop('id', inplace=True, axis=1) id = Data_test['id'] Data_test.drop('id', inplace=True, axis=1) Data_train.skew()
code
128019479/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_test.shape
code
128019479/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import MinMaxScaler from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge from sklearn.svm import SVR from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import GridSearchCV from sklearn.metrics import mean_squared_error
code
128019479/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns
code
128019479/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_test.shape Data_test.columns
code
128019479/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.isnull().sum().sum() Data_train.corr()
code
128019479/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.isnull().sum().sum() Data_train.corr() sns.heatmap(Data_train.corr(), cmap='hot')
code
128019479/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.isnull().sum().sum() Data_train.corr() feature_name = list(Data_train.columns[1:-1]) plt.figure(figsize=(15, 15)) for i in range(len(feature_name)): plt.subplot(4, 4, i + 1) sns.scatterplot(x=Data_train[feature_name[i]], y=Data_train['MedHouseVal'])
code
128019479/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.isnull().sum().sum() Data_train.hist(figsize=(30, 30))
code
128019479/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_test.shape Data_train.columns Data_test.columns Data_train.isnull().sum().sum() Data_test.isnull().sum().sum() Data_train.corr() feature_name = list(Data_train.columns[1:-1]) Data_train.drop('id', inplace=True, axis=1) id = Data_test['id'] Data_test.drop('id', inplace=True, axis=1) Data_test.skew()
code
128019479/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_test.shape Data_test.columns Data_test.describe()
code
128019479/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape Data_train.columns Data_train.isnull().sum().sum()
code
128019479/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd Data_train = pd.read_csv('/kaggle/input/playground-series-s3e1/train.csv') Data_test = pd.read_csv('/kaggle/input/playground-series-s3e1/test.csv') Data_train.shape
code
89140329/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) data['extracted_body'][10]
code
89140329/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) data.head()
code
89140329/cell_25
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS len(STOPWORDS)
code
89140329/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data['Label'].value_counts()
code
89140329/cell_34
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) data.head()
code
89140329/cell_29
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) word_cloud_plot(subset=data[data['Label'] == 0])
code
89140329/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) data['extracted_body'][10].split()
code
89140329/cell_41
[ "text_html_output_1.png" ]
from nltk.stem import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) data['extracted_body_tk'] = data['extracted_body'].apply(lambda x: x.split()) from nltk.stem import WordNetLemmatizer stopwords = list(set(STOPWORDS)) lemmatizer = WordNetLemmatizer() data['extracted_body_tk_lm'] = data['extracted_body_tk'].apply(lambda x: ' '.join([lemmatizer.lemmatize(elem) for elem in x if elem not in stopwords])) from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(max_features=1000) X = vectorizer.fit_transform(data['extracted_body_tk_lm']) df_bow_sklearn = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names()) df_bow_sklearn.shape
code
89140329/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] print('#----------------------------------------------------#') print(df1.head(3)) df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] print('#----------------------------------------------------#') print(df2.head(3)) df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] print('#----------------------------------------------------#') print(df3.head(3)) data = df1.append(df2).append(df3) data = data.reset_index(drop=True) print(data.shape) data.head(20)
code
89140329/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) data['extracted_body'][10]
code
89140329/cell_1
[ "text_plain_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt from wordcloud import WordCloud, STOPWORDS import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89140329/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data['len'] = data['Body'].apply(lambda x: len(str(x))) data print('Quantiles of data : ') print(data['len'].quantile([0, 0.25, 0.5, 0.75, 0.9, 0.95, 0.99, 1]), '\n') fig = plt.figure(figsize=(10, 7)) plt.ylim(-50, 5000) plt.boxplot(data['len'])
code
89140329/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) data['extracted_body'][10]
code
89140329/cell_38
[ "image_output_1.png" ]
from nltk.stem import WordNetLemmatizer from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) data['extracted_body_tk'] = data['extracted_body'].apply(lambda x: x.split()) from nltk.stem import WordNetLemmatizer stopwords = list(set(STOPWORDS)) lemmatizer = WordNetLemmatizer() data['extracted_body_tk_lm'] = data['extracted_body_tk'].apply(lambda x: ' '.join([lemmatizer.lemmatize(elem) for elem in x if elem not in stopwords])) data[['extracted_body_tk', 'extracted_body_tk_lm']][10:11]
code
89140329/cell_35
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) data['extracted_body_tk'] = data['extracted_body'].apply(lambda x: x.split()) data.head()
code
89140329/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) word_cloud_plot(subset=data[data['Label'] == 1])
code
89140329/cell_24
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() plt.title('Bar plot of Ham vs SPam frequencies in the data') plt.xlabel('Labels') plt.ylabel('Frequency') plt.bar(['Ham', 'Spam'], [count_ham, count_spam], color='green')
code
89140329/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) data['extracted_body'][10]
code
89140329/cell_37
[ "image_output_1.png" ]
from nltk.stem import WordNetLemmatizer from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes data["len"] = data["Body"].apply(lambda x: len(str(x))) data #------------------------------------------------------------------ print("Quantiles of data : ") print(data["len"].quantile([0,0.25,0.5,0.75,0.9,0.95,0.99,1]),"\n") #------------------------------------------------------------------ fig = plt.figure(figsize =(10, 7)) plt.ylim(-50,5000) #plt.hist(data["len"]) plt.boxplot(data["len"]) import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) count_ham = len(data) - data['Label'].sum() count_spam = data['Label'].sum() subset = data.copy() def word_cloud_plot(subset): comment_words = '' stopwords = set(STOPWORDS) for val in data['extracted_body']: val = str(val) tokens = val.split() for i in range(len(tokens)): tokens[i] = tokens[i].lower() comment_words += ' '.join(tokens) + ' ' wordcloud = WordCloud(width=800, height=800, background_color='white', stopwords=stopwords, min_font_size=6).generate(comment_words) plt.axis('off') plt.tight_layout(pad=0) data['extracted_body_tk'] = data['extracted_body'].apply(lambda x: x.split()) from nltk.stem import WordNetLemmatizer stopwords = list(set(STOPWORDS)) lemmatizer = WordNetLemmatizer() data['extracted_body_tk_lm'] = data['extracted_body_tk'].apply(lambda x: ' '.join([lemmatizer.lemmatize(elem) for elem in x if elem not in stopwords])) data.head()
code
89140329/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import re df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes import re data['extracted_body'] = data['Body'].apply(lambda x: re.sub('http\\S+', '', str(x))) import re data['extracted_body'] = data['extracted_body'].apply(lambda x: re.sub('\\W+', ' ', x)) data.head()
code
89140329/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('../input/email-spam-dataset/completeSpamAssassin.csv') df1 = df1[['Body', 'Label']] df2 = pd.read_csv('../input/email-spam-dataset/enronSpamSubset.csv') df2 = df2[['Body', 'Label']] df3 = pd.read_csv('../input/email-spam-dataset/lingSpam.csv') df3 = df3[['Body', 'Label']] data = df1.append(df2).append(df3) data = data.reset_index(drop=True) data.dtypes
code
17135990/cell_13
[ "text_html_output_1.png" ]
from IPython.display import display import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass df_raw.SalePrice = np.log(df_raw.SalePrice) add_datepart(df_raw, 'saledate') df_raw.saleYear.head()
code
17135990/cell_20
[ "text_plain_output_1.png" ]
from IPython.display import display import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass df_raw = pd.read_feather('tmp/bulldozers-raw')
code
17135990/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17135990/cell_11
[ "text_plain_output_1.png" ]
from IPython.display import display import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass display_all(df_raw.describe(include='all').T)
code
17135990/cell_1
[ "text_plain_output_1.png" ]
!pip install fastai==0.7.0 !pip install torchtext==0.2.3
code
17135990/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
!ls {PATH}
code
17135990/cell_18
[ "text_plain_output_1.png" ]
from IPython.display import display import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass df_raw.SalePrice = np.log(df_raw.SalePrice) add_datepart(df_raw, 'saledate') df_raw.UsageBand.cat.categories df_raw.UsageBand.cat.set_categories(['High', 'Medium', 'Low'], ordered=True, inplace=True) df_raw.UsageBand = df_raw.UsageBand.cat.codes display_all(df_raw.isnull().sum().sort_index() / len(df_raw))
code
17135990/cell_15
[ "text_html_output_1.png" ]
from IPython.display import display import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass df_raw.SalePrice = np.log(df_raw.SalePrice) add_datepart(df_raw, 'saledate') df_raw.UsageBand.cat.categories
code
17135990/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier m = RandomForestRegressor(n_jobs=-1) m.fit(df, y) m.score(df, y)
code
17135990/cell_10
[ "text_plain_output_1.png" ]
from IPython.display import display import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) PATH = '../input/' df_raw = pd.read_csv(f'{PATH}train/Train.csv', low_memory=False, parse_dates=['saledate']) def display_all(df): pass display_all(df_raw.tail().T)
code
121150522/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/spaceship-titanic/train.csv') df.info()
code
121150522/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.pivot_table(index='CryoSleep', columns='Transported', aggfunc={'Transported': 'count'}) df_count = df[['Age']].apply(pd.value_counts) df_count.plot(kind='bar', color='Orange', figsize=(12, 12)) plt.xticks(rotation=85) plt.title('Most Common Ages') plt.show()
code