path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
74067279/cell_4
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.loc[general['deck_id'] == 0, 'deck_id'] = np.nan general['deck_id'] = general.groupby(['pclass'])['deck_id'].transform(lambda x: x.fillna(x.median())) general.info()
code
74067279/cell_6
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.loc[general['deck_id'] == 0, 'deck_id'] = np.nan general['deck_id'] = general.groupby(['pclass'])['deck_id'].transform(lambda x: x.fillna(x.median())) train_filled = general.head(891) test_filled = general.tail(418) test_filled = test_filled.drop('survived', 1) train_filled.info() test_final = test_filled[['passengerid', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final = train_filled[['passengerid', 'survived', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final1 = train_final.drop('survived', axis=1) train_final1.info()
code
74067279/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) general.info()
code
74067279/cell_7
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.loc[general['deck_id'] == 0, 'deck_id'] = np.nan general['deck_id'] = general.groupby(['pclass'])['deck_id'].transform(lambda x: x.fillna(x.median())) train_filled = general.head(891) test_filled = general.tail(418) test_filled = test_filled.drop('survived', 1) train_filled.info() test_final = test_filled[['passengerid', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final = train_filled[['passengerid', 'survived', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final1 = train_final.drop('survived', axis=1) alg_test = RandomForestClassifier(random_state=1, n_estimators=350, min_samples_split=6, min_samples_leaf=2) alg_test.fit(train_final1, train_final['survived']) predict_y = alg_test.predict(train_final1) metrics.accuracy_score(train_final['survived'], predict_y)
code
74067279/cell_8
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.loc[general['deck_id'] == 0, 'deck_id'] = np.nan general['deck_id'] = general.groupby(['pclass'])['deck_id'].transform(lambda x: x.fillna(x.median())) train_filled = general.head(891) test_filled = general.tail(418) test_filled = test_filled.drop('survived', 1) train_filled.info() test_final = test_filled[['passengerid', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final = train_filled[['passengerid', 'survived', 'pclass', 'age', 'sibsp', 'parch', 'fare', 'sex_id', 'relatives', 'embarked_id', 'is_single', 'deck_id', 'cabin_id', 'title_id']] train_final1 = train_final.drop('survived', axis=1) alg_test = RandomForestClassifier(random_state=1, n_estimators=350, min_samples_split=6, min_samples_leaf=2) alg_test.fit(train_final1, train_final['survived']) predict_y = alg_test.predict(train_final1) metrics.accuracy_score(train_final['survived'], predict_y) alg_test = RandomForestClassifier(random_state=1, n_estimators=350, min_samples_split=6, min_samples_leaf=2) alg_test.fit(train_final1, train_final['survived']) predictions = alg_test.predict(test_final) submission = pd.DataFrame({'PassengerId': test_final['passengerid'], 'Survived': predictions}) submission['Survived'] = submission['Survived'].astype(int) print(submission.head(20)) print(example.head(20)) print(submission.info(5)) print(example.info(5))
code
74067279/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.info()
code
74067279/cell_5
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import re import pandas as pd import numpy as np import re from sklearn.preprocessing import LabelEncoder from sklearn.ensemble import RandomForestClassifier from sklearn import metrics from sklearn.model_selection import cross_val_score train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') example = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.columns = train.columns.str.lower() test.columns = test.columns.str.lower() general = pd.concat([train, test], axis=0) general['fare'] = general.groupby('pclass')['fare'].transform(lambda x: x.fillna(x.median())) general['embarked'] = general.groupby(['pclass'])['embarked'].transform(lambda x: x.fillna(x.value_counts().idxmax())) general['age'] = general.groupby(['pclass', 'parch', 'sibsp'])['age'].transform(lambda x: x.fillna(x.median())) general['age'] = general.groupby(['pclass'])['age'].transform(lambda x: x.fillna(x.median())) genders = {'male': 1, 'female': 0} general['sex_id'] = general['sex'].apply(lambda x: genders.get(x)) general['relatives'] = general['parch'] + general['sibsp'] embarkments = {'S': 1, 'C': 2, 'Q': 3} general['embarked_id'] = general['embarked'].apply(lambda x: embarkments.get(x)) general['is_single'] = general['relatives'].apply(lambda x: 1 if x == 0 else 0) decks = {'U': 0, 'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'T': 8} general['deck_id'] = general['cabin'].fillna('U').apply(lambda c: decks.get(c[0])) general['title'] = general['name'].apply(lambda x: re.search(' ([A-Za-z]+)\\.', x).group(1).lower()) general['cabin_id'] = general['cabin'].str.extract('(\\d+)').astype('float').fillna(0) general['title_id'] = LabelEncoder().fit_transform(general['title']) general.loc[general['deck_id'] == 0, 'deck_id'] = np.nan general['deck_id'] = general.groupby(['pclass'])['deck_id'].transform(lambda x: x.fillna(x.median())) train_filled = general.head(891) test_filled = general.tail(418) test_filled = test_filled.drop('survived', 1) train_filled.info()
code
89128640/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cudf as pd import cupy as np from sklearn.model_selection import cross_val_score import numpy
code
73071065/cell_9
[ "text_html_output_1.png" ]
from scipy.stats.mstats import winsorize from sklearn.linear_model import TweedieRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler, KBinsDiscretizer import numpy as np import pandas as pd X_train = pd.read_csv('../input/30-days-of-ml/train.csv') X_test = pd.read_csv('../input/30-days-of-ml/test.csv') y_train = X_train.target X_train = X_train.set_index('id').drop('target', axis='columns') X_test = X_test.set_index('id') y_stratified = pd.cut(y_train.rank(method='first'), bins=10, labels=False) categoricals = [item for item in X_train.columns if 'cat' in item] dummies = pd.get_dummies(X_train.append(X_test)[categoricals]) X_train[dummies.columns] = dummies.iloc[:len(X_train), :] X_test[dummies.columns] = dummies.iloc[len(X_train):, :] del dummies important_features = ['cat8_E', 'cont0', 'cont5', 'cont7', 'cont8', 'cat1_A', 'cont2', 'cont13', 'cont3', 'cont10', 'cont1', 'cont9', 'cont11', 'cat1', 'cat8_C', 'cont6', 'cont12', 'cat5', 'cat3_C', 'cont4', 'cat8'] X_train = X_train[important_features] X_test = X_test[important_features] folds = 10 skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=0) predictions = np.zeros(len(X_test)) score = list() for k, (train_idx, val_idx) in enumerate(skf.split(X_train, y_stratified)): ss = StandardScaler() X = ss.fit_transform(X_train.iloc[train_idx, :]).astype(np.float32) Xv = ss.transform(X_train.iloc[val_idx, :]).astype(np.float32) Xt = ss.transform(X_test).astype(np.float32) y_train_w = np.array(winsorize(y_train[train_idx], [0.002, 0.0])) glm = TweedieRegressor(power=1, alpha=0.0001, max_iter=10000) glm.fit(X, y_train_w) val_preds = glm.predict(Xv) val_rmse = mean_squared_error(y_true=y_train[val_idx], y_pred=val_preds, squared=False) print(f'Fold {k} RMSE: {val_rmse:0.5f}') predictions += glm.predict(Xt).ravel() score.append(val_rmse) predictions /= folds print(f'CV RMSE {np.mean(score):0.5f} ({np.std(score):0.5f})')
code
73071065/cell_11
[ "text_plain_output_1.png" ]
from scipy.stats.mstats import winsorize from sklearn.linear_model import TweedieRegressor from sklearn.metrics import mean_squared_error from sklearn.model_selection import StratifiedKFold from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler, KBinsDiscretizer import numpy as np import pandas as pd X_train = pd.read_csv('../input/30-days-of-ml/train.csv') X_test = pd.read_csv('../input/30-days-of-ml/test.csv') y_train = X_train.target X_train = X_train.set_index('id').drop('target', axis='columns') X_test = X_test.set_index('id') y_stratified = pd.cut(y_train.rank(method='first'), bins=10, labels=False) categoricals = [item for item in X_train.columns if 'cat' in item] dummies = pd.get_dummies(X_train.append(X_test)[categoricals]) X_train[dummies.columns] = dummies.iloc[:len(X_train), :] X_test[dummies.columns] = dummies.iloc[len(X_train):, :] del dummies important_features = ['cat8_E', 'cont0', 'cont5', 'cont7', 'cont8', 'cat1_A', 'cont2', 'cont13', 'cont3', 'cont10', 'cont1', 'cont9', 'cont11', 'cat1', 'cat8_C', 'cont6', 'cont12', 'cat5', 'cat3_C', 'cont4', 'cat8'] X_train = X_train[important_features] X_test = X_test[important_features] folds = 10 skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=0) predictions = np.zeros(len(X_test)) score = list() for k, (train_idx, val_idx) in enumerate(skf.split(X_train, y_stratified)): ss = StandardScaler() X = ss.fit_transform(X_train.iloc[train_idx, :]).astype(np.float32) Xv = ss.transform(X_train.iloc[val_idx, :]).astype(np.float32) Xt = ss.transform(X_test).astype(np.float32) y_train_w = np.array(winsorize(y_train[train_idx], [0.002, 0.0])) glm = TweedieRegressor(power=1, alpha=0.0001, max_iter=10000) glm.fit(X, y_train_w) val_preds = glm.predict(Xv) val_rmse = mean_squared_error(y_true=y_train[val_idx], y_pred=val_preds, squared=False) predictions += glm.predict(Xt).ravel() score.append(val_rmse) predictions /= folds submission = pd.DataFrame({'id': X_test.index, 'target': predictions}) submission.to_csv('submission.csv', index=False) submission
code
128011630/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape df_train.dropna(inplace=True) df_train.shape df_train.duplicated().sum() df_train.describe(include='all')
code
128011630/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum()
code
128011630/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape df_train.dropna(inplace=True) df_train.shape df_train.duplicated().sum() df_train.dtypes for i, j in df_train.dtypes.items(): if j == 'object': df_train[i] = df_train[i].astype('category') df_train.dtypes
code
128011630/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape df_train.dropna(inplace=True) df_train.shape df_train.duplicated().sum() df_train.dtypes
code
128011630/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape df_train.dropna(inplace=True) df_train.shape df_train.duplicated().sum()
code
128011630/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape df_train.describe(include='all')
code
128011630/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum()
code
128011630/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape df_train.dropna(inplace=True) df_train.shape
code
128011630/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.decomposition import PCA
code
128011630/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.shape
code
128011630/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape for i in df_train.columns: print(i)
code
128011630/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum()
code
128011630/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum() pd.options.display.min_rows = 500 df_train.isna().sum() df_train.drop(['LotFrontage', 'Alley', 'PoolQC', 'Fence', 'MiscFeature'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.isnull().sum() df_train.drop(['BsmtQual', 'BsmtCond', 'FireplaceQu', 'GarageType', 'GarageYrBlt', 'GarageFinish', 'GarageQual', 'GarageCond'], axis=1, inplace=True) pd.options.display.min_rows = 500 pd.set_option('display.max_columns', None) df_train.loc[:, 'BsmtExposure':'Electrical'].isnull().sum() df_train.drop(['BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'], axis=1, inplace=True) df_train.describe(include='all')
code
128011630/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape pd.options.display.min_rows = 500 df_train.isnull().sum()
code
128011630/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/d/salauddintapu/house-prices-advanced-regression-techniques/train.csv') df_train.shape
code
1003108/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter from wordcloud import WordCloud import matplotlib.pyplot as plt import numpy as np import operator import pandas as pd import seaborn as sns def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) files_changed_per_commit = df.groupby(['author_dt', 'commit_hash'])['filename'].agg('count').reset_index().sort_values('author_dt', ascending=True) files_changed_per_commit = pd.Series(files_changed_per_commit['filename'].values, index=files_changed_per_commit['author_dt']) files_changed_per_utc_offset = df.groupby('commit_utc_offset_hours')['filename'].agg('count').reset_index().sort_values('filename', ascending=False) n_authors_by_offset = df.groupby('commit_utc_offset_hours')['author_id'].nunique().reset_index().sort_values('author_id', ascending=False) from collections import Counter import operator n_rows = 10000.0 subject_words = [] for row_number, row in df.ix[0:n_rows].iterrows(): ws = row['subject'].split(' ') subject_words = subject_words + [w.lower() for w in ws] words = [] counts = [] for word, count in sorted(Counter(subject_words).items(), key=operator.itemgetter(1), reverse=True): words.append(word) counts.append(count) from wordcloud import WordCloud wordcloud = WordCloud().generate(' '.join(subject_words)) plt.figure(figsize=(12, 8)) plt.imshow(wordcloud) plt.axis('off')
code
1003108/cell_13
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) files_changed_per_commit = df.groupby(['author_dt', 'commit_hash'])['filename'].agg('count').reset_index().sort_values('author_dt', ascending=True) files_changed_per_commit = pd.Series(files_changed_per_commit['filename'].values, index=files_changed_per_commit['author_dt']) files_changed_per_utc_offset = df.groupby('commit_utc_offset_hours')['filename'].agg('count').reset_index().sort_values('filename', ascending=False) sns.barplot(x='commit_utc_offset_hours', y='filename', data=files_changed_per_utc_offset)
code
1003108/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) commits_series.resample('M').mean().plot(title='number of commits on monthly resampled data')
code
1003108/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) t.plot(title='lines of code added')
code
1003108/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1003108/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) files_changed_per_commit = df.groupby(['author_dt', 'commit_hash'])['filename'].agg('count').reset_index().sort_values('author_dt', ascending=True) files_changed_per_commit = pd.Series(files_changed_per_commit['filename'].values, index=files_changed_per_commit['author_dt']) files_changed_per_commit.plot(title='number files changed per commit')
code
1003108/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import numpy as np import operator import pandas as pd import seaborn as sns def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) files_changed_per_commit = df.groupby(['author_dt', 'commit_hash'])['filename'].agg('count').reset_index().sort_values('author_dt', ascending=True) files_changed_per_commit = pd.Series(files_changed_per_commit['filename'].values, index=files_changed_per_commit['author_dt']) files_changed_per_utc_offset = df.groupby('commit_utc_offset_hours')['filename'].agg('count').reset_index().sort_values('filename', ascending=False) n_authors_by_offset = df.groupby('commit_utc_offset_hours')['author_id'].nunique().reset_index().sort_values('author_id', ascending=False) from collections import Counter import operator n_rows = 10000.0 subject_words = [] for row_number, row in df.ix[0:n_rows].iterrows(): ws = row['subject'].split(' ') subject_words = subject_words + [w.lower() for w in ws] words = [] counts = [] for word, count in sorted(Counter(subject_words).items(), key=operator.itemgetter(1), reverse=True): words.append(word) counts.append(count) wcdf = pd.DataFrame({'word': words, 'count': counts}) sns.barplot(y='word', x='count', data=wcdf[0:20])
code
1003108/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) commits_series.plot(title='number of commits on original time series')
code
1003108/cell_15
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] t = pd.Series(time_df['diff'].values, index=time_df['author_dt']) commits_over_time = df.groupby('author_dt')['commit_hash'].nunique().reset_index().sort_values('author_dt', ascending=True) commits_series = pd.Series(commits_over_time['commit_hash'].values, index=commits_over_time['author_dt']) files_changed_per_commit = df.groupby(['author_dt', 'commit_hash'])['filename'].agg('count').reset_index().sort_values('author_dt', ascending=True) files_changed_per_commit = pd.Series(files_changed_per_commit['filename'].values, index=files_changed_per_commit['author_dt']) files_changed_per_utc_offset = df.groupby('commit_utc_offset_hours')['filename'].agg('count').reset_index().sort_values('filename', ascending=False) n_authors_by_offset = df.groupby('commit_utc_offset_hours')['author_id'].nunique().reset_index().sort_values('author_id', ascending=False) sns.barplot(x='commit_utc_offset_hours', y='author_id', data=n_authors_by_offset)
code
1003108/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') df.head()
code
1003108/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd def clean_ts(df): return df[(df['author_timestamp'] > 1104600000) & (df['author_timestamp'] < 1487807212)] df = clean_ts(pd.read_csv('../input/linux_kernel_git_revlog.csv')) df['author_dt'] = pd.to_datetime(df['author_timestamp'], unit='s') time_df = df.groupby(['author_timestamp', 'author_dt'])[['n_additions', 'n_deletions']].agg(np.sum).reset_index().sort_values('author_timestamp', ascending=True) time_df['diff'] = time_df['n_additions'] - time_df['n_deletions'] time_df.head()
code
128031687/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape
code
128031687/cell_13
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.head()
code
128031687/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes
code
128031687/cell_34
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() data.describe()
code
128031687/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.info()
code
128031687/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.figure(figsize=(12, 10)) sns.countplot(data.region) plt.xticks(rotation=90)
code
128031687/cell_40
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() sns.countplot(data=data, x='department') plt.show()
code
128031687/cell_29
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum()
code
128031687/cell_48
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.xticks(rotation=90) sns.countplot(data=data, x='education') plt.show()
code
128031687/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() data['department'].value_counts().head(10).plot(kind='pie', autopct='%1.1f%%', figsize=(10, 10)).legend()
code
128031687/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns print('lenght of data is', len(data))
code
128031687/cell_52
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.xticks(rotation=90) sns.countplot(data=data, x='gender') plt.show()
code
128031687/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.xticks(rotation=90) data['region'].value_counts().head(10).plot(kind='pie', autopct='%1.1f%%', figsize=(10, 10), startangle=0).legend()
code
128031687/cell_49
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.xticks(rotation=90) data['education'].value_counts().head(7).plot(kind='pie', autopct='%1.1f%%', figsize=(10, 10), startangle=0).legend()
code
128031687/cell_32
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() print('Count of rows in the data is: ', len(data))
code
128031687/cell_15
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.tail()
code
128031687/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns
code
128031687/cell_31
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() print('Count of columns in the data is: ', len(data.columns))
code
128031687/cell_53
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() plt.xticks(rotation=90) data['gender'].value_counts().head(7).plot(kind='pie', autopct='%1.1f%%', figsize=(10, 10), startangle=0).legend()
code
128031687/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1))
code
128031687/cell_37
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('/kaggle/input/employees-evaluation-for-promotion/employee_promotion.csv', encoding='ISO-8859-1', engine='python') data.columns data.shape data.dtypes np.sum(data.isnull().any(axis=1)) data.isnull().sum() data.hist(figsize=(20, 20), bins=20, color='#107009AA') plt.title('Numeric Features Distribution') plt.show()
code
105186524/cell_13
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91]
code
105186524/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') youtube.info()
code
105186524/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count
code
105186524/cell_11
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index
code
105186524/cell_19
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first') youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix'] youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m') youtube['publish_time'] = pd.to_datetime(youtube['publish_time']) youtube.describe()
code
105186524/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105186524/cell_7
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100
code
105186524/cell_18
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first') youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix'] youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m') youtube['publish_time'] = pd.to_datetime(youtube['publish_time']) youtube.info()
code
105186524/cell_15
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title')
code
105186524/cell_16
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube['title'].loc[34137]
code
105186524/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') youtube.head()
code
105186524/cell_17
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first') youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix']
code
105186524/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] youtube.info()
code
105186524/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 indices = list(np.where(youtube['description'].isnull())[0]) indices
code
122244852/cell_2
[ "text_html_output_1.png" ]
!pip install mlflow dagshub
code
122244852/cell_7
[ "text_plain_output_1.png" ]
import mlflow mlflow.set_tracking_uri('https://dagshub.com/ChiragChauhan4579/MLflow-integration.mlflow') mlflow.set_experiment(experiment_name='wine-quality')
code
122244852/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/red-wine-quality-cortez-et-al-2009/winequality-red.csv') df.head()
code
122244852/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score from sklearn.model_selection import train_test_split, GridSearchCV import mlflow import numpy as np mlflow.set_tracking_uri('https://dagshub.com/ChiragChauhan4579/MLflow-integration.mlflow') mlflow.set_experiment(experiment_name='wine-quality') y_train.value_counts() alpha = 0.6 l1_ratio = 0.9 with mlflow.start_run(): mlflow.set_tag('model', 'elastic-net') mlflow.log_param('alpha', alpha) mlflow.log_param('l1_ratio', l1_ratio) lr = ElasticNet(alpha=alpha, l1_ratio=alpha) lr.fit(x_train, y_train) y_pred = lr.predict(x_test) rmse = np.sqrt(mean_squared_error(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) mlflow.log_metric('rmse', rmse) mlflow.log_metric('mae', mae) mlflow.log_metric('r2', r2) mlflow.sklearn.log_model(lr, 'elastic-net-base') rf = RandomForestRegressor() with mlflow.start_run(): mlflow.set_tag('hyperparameter tuning', 'random forest') params = [{'n_estimators': [100, 250, 500, 1000], 'max_depth': list(range(3, 7)), 'max_features': list(range(0, 14))}] reg = GridSearchCV(rf, params, cv=10, scoring='neg_mean_absolute_error') reg.fit(x_train, y_train) y_pred = reg.predict(x_test) rmse = np.sqrt(mean_squared_error(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) mlflow.log_metric('rmse', rmse) mlflow.log_metric('mae', mae) mlflow.log_metric('r2', r2) mlflow.log_param('n_estimators', reg.best_params_['n_estimators']) mlflow.log_param('max_leaf_nodes', reg.best_params_['max_depth']) mlflow.sklearn.log_model(reg.best_estimator_, 'best_rf_model')
code
122244852/cell_10
[ "text_plain_output_1.png" ]
y_train.value_counts()
code
122244852/cell_12
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from sklearn.linear_model import ElasticNet from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score import mlflow import numpy as np mlflow.set_tracking_uri('https://dagshub.com/ChiragChauhan4579/MLflow-integration.mlflow') mlflow.set_experiment(experiment_name='wine-quality') y_train.value_counts() alpha = 0.6 l1_ratio = 0.9 with mlflow.start_run(): mlflow.set_tag('model', 'elastic-net') mlflow.log_param('alpha', alpha) mlflow.log_param('l1_ratio', l1_ratio) lr = ElasticNet(alpha=alpha, l1_ratio=alpha) lr.fit(x_train, y_train) y_pred = lr.predict(x_test) rmse = np.sqrt(mean_squared_error(y_test, y_pred)) mae = mean_absolute_error(y_test, y_pred) r2 = r2_score(y_test, y_pred) mlflow.log_metric('rmse', rmse) mlflow.log_metric('mae', mae) mlflow.log_metric('r2', r2) mlflow.sklearn.log_model(lr, 'elastic-net-base') print(f'Elastic net Params: alpha: {alpha}, l1_ratio: {l1_ratio}') print(f'Elastic net metric: rmse:{rmse}, mae:{mae},r2:{r2}')
code
122244852/cell_5
[ "text_plain_output_1.png" ]
import dagshub import dagshub dagshub.init('MLflow-integration', 'ChiragChauhan4579', mlflow=True)
code
73070733/cell_4
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) test.head()
code
73070733/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] X = train.drop(['target'], axis=1) X.head()
code
73070733/cell_18
[ "text_html_output_1.png" ]
from lightgbm import LGBMRegressor from sklearn.compose import ColumnTransformer from sklearn.model_selection import KFold from sklearn.model_selection import cross_validate, cross_val_predict from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder, StandardScaler from sklearn.preprocessing import OrdinalEncoder import numpy as np import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] X = train.drop(['target'], axis=1) categorical_cols = [col for col in X.columns if 'cat' in col] numerical_cols = [col for col in X.columns if 'cont' in col] numerical_transformer = StandardScaler() categorical_transformer = OrdinalEncoder(handle_unknown='ignore', dtype=np.int) preprocessor = ColumnTransformer(transformers=[('num', numerical_transformer, numerical_cols), ('cat', categorical_transformer, categorical_cols)]) model = LGBMRegressor(device='gpu', gpu_platform_id=0, gpu_device_id=0) pipeline = Pipeline(steps=[('preprocessor', preprocessor), ('model', model)]) kfold = KFold(n_splits=5, shuffle=True, random_state=42) results = cross_validate(pipeline, X=X, y=y, cv=kfold, scoring='neg_root_mean_squared_error', n_jobs=-1) print('RMSE: %f (%f)' % (-results['test_score'].mean(), results['test_score'].std()))
code
330183/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.info()
code
330183/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) print('Number of observations in the training set: %d (%d%%)' % (n_train, ratio * 100)) print('Number of observations in the test set: %d (%d%%)' % (n_test, (1 - ratio) * 100))
code
330183/cell_25
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) df_titanic_na.head()
code
330183/cell_83
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1) forest.fit(X_train, y_train)
code
330183/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=0.8, square=True)
code
330183/cell_90
[ "text_plain_output_1.png" ]
from sklearn import cross_validation, metrics from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape df_titanic_ml = df_titanic_ml[np.invert(null_age)] forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1) forest.fit(X_train, y_train) def forest_metrics(X_test, y_test, clf): f_preds = clf.predict_proba(X_test)[:, 1] f_fpr, f_tpr, _ = metrics.roc_curve(y_test, f_preds) fig, ax = plt.subplots() ax.plot(f_fpr, f_tpr) lims = [ np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes ] print("Model Accuracy: %.1f%%" % (clf.score(X_test,y_test) * 100)) print ("Model ROC AUC: %.1f%%" % (metrics.roc_auc_score(y_test, f_preds)*100)) print("ROC Curve") ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0) print(metrics.classification_report(y_test, forest.predict(X_test)))
code
330183/cell_87
[ "text_plain_output_1.png" ]
from sklearn import cross_validation, metrics from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape df_titanic_ml = df_titanic_ml[np.invert(null_age)] forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1) forest.fit(X_train, y_train) def forest_metrics(X_test, y_test, clf): f_preds = clf.predict_proba(X_test)[:, 1] f_fpr, f_tpr, _ = metrics.roc_curve(y_test, f_preds) fig, ax = plt.subplots() ax.plot(f_fpr, f_tpr) lims = [ np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes ] print("Model Accuracy: %.1f%%" % (clf.score(X_test,y_test) * 100)) print ("Model ROC AUC: %.1f%%" % (metrics.roc_auc_score(y_test, f_preds)*100)) print("ROC Curve") ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0) forest_metrics(X_test, y_test, forest)
code
330183/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) survived = df_titanic_na.Survived == 1 died = df_titanic_na.Survived == 0 g = sns.distplot(df_titanic_na.Embarked, color="darkgreen", hist_kws={"alpha": 0.3}, kde=None) g.set_xticklabels(["Cherbourg", "", "Queenstown", "", "Southampton"]) sns.distplot(df_titanic_na[survived].Age, color='darkgreen', hist_kws={'alpha': 0.3}) sns.distplot(df_titanic_na[died].Age, color='darkred', hist_kws={'alpha': 0.3})
code
330183/cell_73
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape df_titanic_ml = df_titanic_ml[np.invert(null_age)] df_titanic_ml.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_ml.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) emb_dummies = pd.get_dummies(df_titanic_ml.Embarked, prefix='Embarked') df_titanic_ml = df_titanic_ml.join(emb_dummies) df_titanic_ml.drop('Embarked', axis=1, inplace=True) df_titanic_ml.head()
code
330183/cell_41
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) survived = df_titanic_na.Survived == 1 died = df_titanic_na.Survived == 0 g = sns.distplot(df_titanic_na.Embarked, color='darkgreen', hist_kws={'alpha': 0.3}, kde=None) g.set_xticklabels(['Cherbourg', '', 'Queenstown', '', 'Southampton'])
code
330183/cell_50
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) survived = df_titanic_na.Survived == 1 died = df_titanic_na.Survived == 0 g = sns.distplot(df_titanic_na.Embarked, color="darkgreen", hist_kws={"alpha": 0.3}, kde=None) g.set_xticklabels(["Cherbourg", "", "Queenstown", "", "Southampton"]) g = sns.factorplot(x="Pclass", y="Survived", hue="Sex", data=df_titanic_na, size=6, kind="bar", palette="muted", ci=None) g.despine(left=True) g.set_ylabels("Survival Probability") g.set_xlabels("Passenger Class") g = sns.factorplot(x='Embarked', y='Survived', hue='Pclass', data=df_titanic_na, size=6, kind='bar', palette='muted', ci=None) g.despine(left=True) g.set_ylabels('Survival Probability') g.set_xlabels('Embarkation Port') g.set_xticklabels(['Cherbourg', 'Queenstown', 'Southampton'])
code
330183/cell_64
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape
code
330183/cell_89
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import cross_validation, metrics from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape df_titanic_ml = df_titanic_ml[np.invert(null_age)] forest = RandomForestClassifier(n_estimators=1000, n_jobs=-1) forest.fit(X_train, y_train) def forest_metrics(X_test, y_test, clf): f_preds = clf.predict_proba(X_test)[:, 1] f_fpr, f_tpr, _ = metrics.roc_curve(y_test, f_preds) fig, ax = plt.subplots() ax.plot(f_fpr, f_tpr) lims = [ np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes ] print("Model Accuracy: %.1f%%" % (clf.score(X_test,y_test) * 100)) print ("Model ROC AUC: %.1f%%" % (metrics.roc_auc_score(y_test, f_preds)*100)) print("ROC Curve") ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0) print(metrics.confusion_matrix(y_test, forest.predict(X_test)))
code
330183/cell_68
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape null_age = df_titanic_ml.Age.isnull() df_titanic_ml[null_age].shape df_titanic_ml = df_titanic_ml[np.invert(null_age)] df_titanic_ml.info()
code
330183/cell_62
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) df_titanic_ml = df_titanic.copy() df_titanic_ml.Embarked = df_titanic_ml.Embarked.fillna('Southampton') df_titanic_ml[df_titanic_ml.Embarked.isnull()].shape
code
330183/cell_80
[ "text_html_output_1.png" ]
from sklearn import cross_validation, metrics from sklearn.ensemble import RandomForestClassifier
code
330183/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10)
code
330183/cell_47
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color="red", hist_kws={"alpha": 0.3}, kde=None) g.set_xticks([0,1]) g.autoscale() g.set_xticklabels(["Dead", "Survived"]) corrmat = df_titanic_na[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].corr() f, ax = plt.subplots(figsize=(10, 7)) sns.heatmap(corrmat, vmax=.8, square=True) survived = df_titanic_na.Survived == 1 died = df_titanic_na.Survived == 0 g = sns.distplot(df_titanic_na.Embarked, color="darkgreen", hist_kws={"alpha": 0.3}, kde=None) g.set_xticklabels(["Cherbourg", "", "Queenstown", "", "Southampton"]) g = sns.factorplot(x='Pclass', y='Survived', hue='Sex', data=df_titanic_na, size=6, kind='bar', palette='muted', ci=None) g.despine(left=True) g.set_ylabels('Survival Probability') g.set_xlabels('Passenger Class')
code
330183/cell_31
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/train.csv') df_test = pd.read_csv('../input/test.csv') n_train = df_train.shape[0] n_test = df_test.shape[0] ratio = round(n_train / (n_train + n_test), 1) df_train.sample(10) df_titanic = df_train.drop(['Ticket', 'Cabin', 'Name'], axis=1) df_titanic_na = df_titanic.dropna() df_titanic_na.Sex = df_titanic.Sex.map({'female': 0, 'male': 1}) df_titanic_na.Embarked = df_titanic.Embarked.map({'C': 0, 'Q': 1, 'S': 2}) g = sns.distplot(df_titanic_na.Survived, color='red', hist_kws={'alpha': 0.3}, kde=None) g.set_xticks([0, 1]) g.autoscale() g.set_xticklabels(['Dead', 'Survived'])
code