path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90129163/cell_32
[ "text_plain_output_1.png" ]
trn_data = name_ext(trn_data) tst_data = name_ext(tst_data)
code
90129163/cell_51
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def one_hot(df, one_hot_categ): for col in one_hot_categ: tmp = pd.get_dummies(df[col], prefix=col) df = pd.concat([df, tmp], axis=1) df = df.drop(columns=one_hot_categ) return df trn_data = one_hot(trn_data, categorical_features_onehot) tst_data = one_hot(tst_data, categorical_features_onehot) trn_data.info(verbose=True)
code
90129163/cell_68
[ "text_plain_output_1.png", "image_output_1.png" ]
sub['Transported'] = preds sub.to_csv('submission_simple_split_03112022.csv', index=False)
code
90129163/cell_62
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score val_preds = cls.predict(X_valid[features]) val_preds = val_preds.astype('bool') accuracy = accuracy_score(val_preds, y_valid)
code
90129163/cell_59
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from xgboost import XGBClassifier from catboost import CatBoostClassifier from lightgbm import LGBMClassifier
code
90129163/cell_58
[ "text_plain_output_1.png" ]
X_train.shape
code
90129163/cell_28
[ "text_plain_output_1.png" ]
trn_data = fill_missing(trn_data) tst_data = fill_missing(tst_data)
code
90129163/cell_78
[ "text_plain_output_1.png" ]
scores = [] y_probs = [] for fold, (trn_id, val_id) in enumerate(folds.split(trn_data[features], trn_data[target_feature])): X_train, y_train = (trn_data[features].iloc[trn_id], trn_data[target_feature].iloc[trn_id]) X_valid, y_valid = (trn_data[features].iloc[val_id], trn_data[target_feature].iloc[val_id]) model = XGBClassifier(**optuna_params) model.fit(X_train, y_train, eval_set=[(X_valid, y_valid)], eval_metric=['logloss'], early_stopping_rounds=50, verbose=False) valid_pred = model.predict(X_valid) valid_score = accuracy_score(y_valid, valid_pred) print('Fold:', fold, 'Accuracy:', valid_score) scores.append(valid_score) y_probs.append(model.predict_proba(tst_data[features]))
code
90129163/cell_8
[ "text_plain_output_1.png" ]
pd.options.display.float_format = '{:,.2f}'.format pd.set_option('display.max_columns', NCOLS) pd.set_option('display.max_rows', NROWS)
code
90129163/cell_15
[ "text_plain_output_1.png" ]
def describe_categ(df): for col in df.columns: unique_samples = list(df[col].unique()) unique_values = df[col].nunique() print(f' {col}: {unique_values} Unique Values, Data Sample >> {unique_samples[:5]}') print(' ...') return None
code
90129163/cell_16
[ "text_plain_output_1.png" ]
describe_categ(trn_data)
code
90129163/cell_38
[ "text_plain_output_1.png" ]
def route(df): """ Calculate a combination of origin and destinations, creates a new feature for training. Args: Returns: """ df['Route'] = df['HomePlanet'] + df['Destination'] return df
code
90129163/cell_75
[ "text_plain_output_1.png" ]
import optuna from sklearn.ensemble import ExtraTreesClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler
code
90129163/cell_47
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder def encode_categorical(train_df, test_df, categ_feat=categorical_features): """ """ encoder_dict = {} concat_data = pd.concat([trn_data[categ_feat], tst_data[categ_feat]]) for col in concat_data.columns: print('Encoding: ', col, '...') encoder = LabelEncoder() encoder.fit(concat_data[col]) encoder_dict[col] = encoder train_df[col + '_Enc'] = encoder.transform(train_df[col]) test_df[col + '_Enc'] = encoder.transform(test_df[col]) train_df = train_df.drop(columns=categ_feat, axis=1) test_df = test_df.drop(columns=categ_feat, axis=1) return (train_df, test_df)
code
90129163/cell_66
[ "text_plain_output_1.png" ]
plt.figure(figsize=(10, 7)) feature_importance(cls)
code
90129163/cell_17
[ "text_html_output_1.png", "text_plain_output_1.png" ]
describe_categ(tst_data)
code
90129163/cell_35
[ "text_plain_output_1.png" ]
trn_data = trn_data.merge(trn_relatives, how='left', on=['FamilyName']) tst_data = tst_data.merge(tst_relatives, how='left', on=['FamilyName'])
code
90129163/cell_77
[ "text_plain_output_1.png" ]
optuna_params = {'n_estimators': 474, 'max_depth': 12, 'learning_rate': 0.17092496820170439, 'subsample': 0.8681931753955343, 'colsample_bytree': 0.6753406152924646, 'reg_lambda': 8.439432864212677, 'reg_alpha': 1.6521594249189673, 'gamma': 9.986385923158347, 'min_child_weight': 11, 'random_state': 69, 'objective': 'binary:logistic', 'tree_method': 'gpu_hist'}
code
90129163/cell_43
[ "text_plain_output_1.png" ]
trn_data = extract_group(trn_data) tst_data = extract_group(tst_data)
code
90129163/cell_31
[ "text_plain_output_1.png" ]
def name_ext(df): """ Split the Name of the passenger into First and Family... """ df['FirstName'] = df['Name'].str.split(' ', expand=True)[0] df['FamilyName'] = df['Name'].str.split(' ', expand=True)[1] df.drop(columns=['Name'], inplace=True) return df
code
90129163/cell_46
[ "text_plain_output_1.png" ]
numerical_features = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'Total_Billed'] categorical_features = ['FirstName', 'FamilyName', 'CabinNum', 'TravelGroup'] categorical_features_onehot = ['HomePlanet', 'CryoSleep', 'CabinDeck', 'CabinSide', 'Destination', 'VIP'] target_feature = 'Transported'
code
90129163/cell_24
[ "text_html_output_1.png", "text_plain_output_1.png" ]
analyse_categ_target(trn_data)
code
90129163/cell_14
[ "text_plain_output_1.png" ]
trn_data.describe()
code
90129163/cell_53
[ "text_plain_output_1.png" ]
trn_data.columns
code
90129163/cell_10
[ "text_plain_output_1.png" ]
trn_data = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') tst_data = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') sub = pd.read_csv('/kaggle/input/spaceship-titanic/sample_submission.csv')
code
90129163/cell_27
[ "text_html_output_1.png", "text_plain_output_1.png" ]
def fill_missing(df): """ Fill nan values or missing data with mean or most commond value... """ numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numeric_tmp = df.select_dtypes(include=numerics) categ_tmp = df.select_dtypes(exclude=numerics) for col in numeric_tmp.columns: print(col) df[col] = df[col].fillna(value=df[col].mean()) for col in categ_tmp.columns: print(col) df[col] = df[col].fillna(value=df[col].mode()[0]) print('...') return df
code
90129163/cell_37
[ "text_plain_output_1.png" ]
trn_data = cabin_separation(trn_data) tst_data = cabin_separation(tst_data)
code
90129163/cell_12
[ "text_plain_output_1.png" ]
trn_data.info()
code
90129163/cell_71
[ "text_plain_output_1.png" ]
X_train, X_valid, y_train, y_valid = train_test_split(trn_data[features], trn_data[target_feature]) def objective(trial): n_estimators = trial.suggest_int('n_estimators', 8, 2048) max_depth = trial.suggest_int('max_depth', 2, 16) learning_rate = trial.suggest_float('learning_rate', 0.01, 0.2) subsample = trial.suggest_float('subsample', 0.5, 1) colsample_bytree = trial.suggest_float('colsample_bytree', 0.5, 1) reg_lambda = trial.suggest_float('reg_lambda', 1, 20) reg_alpha = trial.suggest_float('reg_alpha', 0, 20) gamma = trial.suggest_float('gamma', 0, 20) min_child_weight = trial.suggest_int('min_child_weight', 0, 128) clf = XGBClassifier(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth, subsample=subsample, colsample_bytree=colsample_bytree, reg_lambda=reg_lambda, reg_alpha=reg_alpha, gamma=gamma, min_child_weight=min_child_weight, random_state=69, objective='binary:logistic', tree_method='gpu_hist') clf.fit(X_train, y_train) valid_pred = clf.predict(X_valid) score = accuracy_score(y_valid, valid_pred) return score
code
90129163/cell_70
[ "text_plain_output_1.png" ]
import optuna
code
90129163/cell_36
[ "text_plain_output_1.png" ]
def cabin_separation(df): """ Split the Cabin name into Deck, Number and Side """ df['CabinDeck'] = df['Cabin'].str.split('/', expand=True)[0] df['CabinNum'] = df['Cabin'].str.split('/', expand=True)[1] df['CabinSide'] = df['Cabin'].str.split('/', expand=True)[2] df.drop(columns=['Cabin'], inplace=True) return df
code
18140916/cell_6
[ "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() print('Shape of X_train:', X_train.shape) print('Shape of X_val:', X_val.shape) print('Shape of X_test:', X_test.shape) print('Shape of y_train:', y_train.shape) print('Shape of y_val:', y_val.shape)
code
18140916/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule from sklearn.linear_model import (LinearRegression, Lasso, from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import numpy as np import time data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() metric = lambda y1_real, y2_real: np.sqrt(mean_squared_error(y1_real, y2_real)) y_trfm = lambda y: np.expm1(y) def get_score(model, X, y): preds = model.predict(X) preds = y_trfm(preds) y = y_trfm(y) return metric(preds, y) pipeline_reg = data_preprocess_module.build_pipeline(LinearRegression()) pipeline_reg.fit(X_train, y_train) get_score(pipeline_reg, X_val, y_val) pipeline_lasso = data_preprocess_module.build_pipeline(Lasso()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} lasso = GridSearchCV(pipeline_lasso, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() lasso.fit(X_train, y_train) get_score(lasso, X_val, y_val) pipeline_ridge = data_preprocess_module.build_pipeline(Ridge()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} ridge = GridSearchCV(pipeline_ridge, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() ridge.fit(X_train, y_train) get_score(ridge, X_val, y_val) pipeline_elast = data_preprocess_module.build_pipeline(ElasticNet()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001], 'model__l1_ratio': [0.25, 0.5, 0.75]} elast = GridSearchCV(pipeline_elast, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() elast.fit(X_train, y_train) print('Time taken for hyperparameter tuning: {:.2f} min'.format((time.time() - time_start) / 60)) get_score(elast, X_val, y_val)
code
18140916/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule from sklearn.linear_model import (LinearRegression, Lasso, from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import numpy as np import time data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() metric = lambda y1_real, y2_real: np.sqrt(mean_squared_error(y1_real, y2_real)) y_trfm = lambda y: np.expm1(y) def get_score(model, X, y): preds = model.predict(X) preds = y_trfm(preds) y = y_trfm(y) return metric(preds, y) pipeline_reg = data_preprocess_module.build_pipeline(LinearRegression()) pipeline_reg.fit(X_train, y_train) get_score(pipeline_reg, X_val, y_val) pipeline_lasso = data_preprocess_module.build_pipeline(Lasso()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} lasso = GridSearchCV(pipeline_lasso, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() lasso.fit(X_train, y_train) get_score(lasso, X_val, y_val) pipeline_ridge = data_preprocess_module.build_pipeline(Ridge()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} ridge = GridSearchCV(pipeline_ridge, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() ridge.fit(X_train, y_train) get_score(ridge, X_val, y_val) pipeline_elast = data_preprocess_module.build_pipeline(ElasticNet()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001], 'model__l1_ratio': [0.25, 0.5, 0.75]} elast = GridSearchCV(pipeline_elast, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() elast.fit(X_train, y_train) get_score(elast, X_val, y_val) elast.best_params_
code
18140916/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule from sklearn.linear_model import (LinearRegression, Lasso, from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import numpy as np import time data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() metric = lambda y1_real, y2_real: np.sqrt(mean_squared_error(y1_real, y2_real)) y_trfm = lambda y: np.expm1(y) def get_score(model, X, y): preds = model.predict(X) preds = y_trfm(preds) y = y_trfm(y) return metric(preds, y) pipeline_reg = data_preprocess_module.build_pipeline(LinearRegression()) pipeline_reg.fit(X_train, y_train) get_score(pipeline_reg, X_val, y_val) pipeline_lasso = data_preprocess_module.build_pipeline(Lasso()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} lasso = GridSearchCV(pipeline_lasso, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() lasso.fit(X_train, y_train) get_score(lasso, X_val, y_val) pipeline_ridge = data_preprocess_module.build_pipeline(Ridge()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} ridge = GridSearchCV(pipeline_ridge, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() ridge.fit(X_train, y_train) print('Time taken for hyperparameter tuning: {:.2f} min'.format((time.time() - time_start) / 60)) get_score(ridge, X_val, y_val)
code
18140916/cell_10
[ "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule from sklearn.metrics import mean_squared_error import numpy as np data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() metric = lambda y1_real, y2_real: np.sqrt(mean_squared_error(y1_real, y2_real)) y_trfm = lambda y: np.expm1(y) def get_score(model, X, y): preds = model.predict(X) preds = y_trfm(preds) y = y_trfm(y) return metric(preds, y) pipeline_reg = data_preprocess_module.build_pipeline(LinearRegression()) pipeline_reg.fit(X_train, y_train) get_score(pipeline_reg, X_val, y_val)
code
18140916/cell_12
[ "text_plain_output_1.png" ]
from preprocess import DataPreprocessModule from sklearn.linear_model import (LinearRegression, Lasso, from sklearn.metrics import mean_squared_error from sklearn.model_selection import GridSearchCV import numpy as np import time data_preprocess_module = DataPreprocessModule(train_path='../input/hdb-resale-price-prediction/train.csv', test_path='../input/hdb-resale-price-prediction/test.csv') X_train, X_val, X_test, y_train, y_val = data_preprocess_module.get_preprocessed_data() metric = lambda y1_real, y2_real: np.sqrt(mean_squared_error(y1_real, y2_real)) y_trfm = lambda y: np.expm1(y) def get_score(model, X, y): preds = model.predict(X) preds = y_trfm(preds) y = y_trfm(y) return metric(preds, y) pipeline_reg = data_preprocess_module.build_pipeline(LinearRegression()) pipeline_reg.fit(X_train, y_train) get_score(pipeline_reg, X_val, y_val) pipeline_lasso = data_preprocess_module.build_pipeline(Lasso()) params = {'model__alpha': [10, 1, 0.1, 0.01, 0.001]} lasso = GridSearchCV(pipeline_lasso, params, cv=5, scoring='neg_mean_squared_error', n_jobs=-1) time_start = time.time() lasso.fit(X_train, y_train) print('Time taken for hyperparameter tuning: {:.2f} min'.format((time.time() - time_start) / 60)) get_score(lasso, X_val, y_val)
code
331878/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df.loc[df['BasePay'] == 0.0] = 0.0 print(df['BasePay'])
code
331878/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') print(df.dtypes)
code
331878/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv')
code
331878/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df.loc[df['BasePay'] == 0.0] = 0.0 df1 = df.groupby(by=['Year', 'JobTitle'])['BasePay'].sum() print(df1)
code
331878/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
331878/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df.loc[df['BasePay'] == 0.0] = 0.0 print(df['BasePay'])
code
331878/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df.loc[df['BasePay'] == 0.0] = 0.0 df['BasePay'] = df['BasePay'].astype('float')
code
331878/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') print(df.describe)
code
331878/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df.loc[df['BasePay'] == 0.0] = 0.0 df1 = df.groupby(by=['Year', 'JobTitle'])['BasePay'].sum()
code
331878/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Salaries.csv') df['BasePay'].fillna(0)
code
128008988/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g plt.ticklabel_format(style='plain', axis='y') c = df.groupby('category')[['price']].sum().reset_index() c plt.ticklabel_format(style='plain', axis='y') plt.ticklabel_format(style='plain', axis='y') p = df.groupby('payment_method')[['price']].sum().reset_index() p plt.ticklabel_format(style='plain', axis='y') plt.ticklabel_format(style='plain', axis='y') s = df.groupby('shopping_mall')[['price']].sum().reset_index() s plt.figure(figsize=(15, 5)) sns.barplot(x='shopping_mall', y='price', data=s) plt.title('money spended in which mall') plt.ylabel('amount spend') plt.ticklabel_format(style='plain', axis='y') plt.show()
code
128008988/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g sns.barplot(x='gender', y='price', data=g) plt.ylabel('total amount spend') plt.title('amount spend by m/f on shopping') plt.ticklabel_format(style='plain', axis='y') plt.show()
code
128008988/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() df.describe()
code
128008988/cell_20
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g c = df.groupby('category')[['price']].sum().reset_index() c p = df.groupby('payment_method')[['price']].sum().reset_index() p s = df.groupby('shopping_mall')[['price']].sum().reset_index() s
code
128008988/cell_2
[ "image_output_1.png" ]
import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore')
code
128008988/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() for col in df.describe(include='object').columns: print(col) print(df[col].unique()) print('--' * 50)
code
128008988/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g plt.ticklabel_format(style='plain', axis='y') c = df.groupby('category')[['price']].sum().reset_index() c plt.ticklabel_format(style='plain', axis='y') plt.ticklabel_format(style='plain', axis='y') p = df.groupby('payment_method')[['price']].sum().reset_index() p plt.ticklabel_format(style='plain', axis='y') sns.barplot(x='payment_method', y='price', data=df, hue='gender') plt.title('amount payed by which method by m/f') plt.ticklabel_format(style='plain', axis='y') plt.ylabel('amount spend') plt.show()
code
128008988/cell_7
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df
code
128008988/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g plt.ticklabel_format(style='plain', axis='y') c = df.groupby('category')[['price']].sum().reset_index() c plt.ticklabel_format(style='plain', axis='y') plt.ticklabel_format(style='plain', axis='y') p = df.groupby('payment_method')[['price']].sum().reset_index() p sns.barplot(x='payment_method', y='price', data=p) plt.title('amount payed by which method') plt.ticklabel_format(style='plain', axis='y') plt.ylabel('amount spend') plt.show()
code
128008988/cell_8
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum()
code
128008988/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g plt.ticklabel_format(style='plain', axis='y') c = df.groupby('category')[['price']].sum().reset_index() c plt.figure(figsize=(10, 5)) sns.barplot(x='category', y='price', data=c) plt.title('amount spent category wise') plt.ylabel('amount spend') plt.ticklabel_format(style='plain', axis='y') plt.show()
code
128008988/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g plt.ticklabel_format(style='plain', axis='y') c = df.groupby('category')[['price']].sum().reset_index() c plt.ticklabel_format(style='plain', axis='y') plt.figure(figsize=(10, 5)) sns.barplot(x='category', y='price', data=df, hue='gender') plt.title('amount spent category wise by m/f') plt.ylabel('amount spend') plt.ticklabel_format(style='plain', axis='y') plt.show()
code
128008988/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g c = df.groupby('category')[['price']].sum().reset_index() c p = df.groupby('payment_method')[['price']].sum().reset_index() p
code
128008988/cell_14
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g c = df.groupby('category')[['price']].sum().reset_index() c
code
128008988/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() df.info()
code
128008988/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df.drop(['invoice_no', 'customer_id', 'invoice_date'], axis=1, inplace=True) df.isnull().sum() g = df.groupby('gender')[['price']].sum().reset_index() g
code
128008988/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv') df
code
49123033/cell_42
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.dropna(subset=['Embarked'], inplace=True) sns.pairplot(train_data[['Survived', 'Pclass', 'Age', 'Fare']], hue='Survived', palette='hls')
code
49123033/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.head()
code
49123033/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.head()
code
49123033/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.info()
code
49123033/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.info()
code
49123033/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.head()
code
49123033/cell_33
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.head()
code
49123033/cell_55
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.dropna(subset=['Embarked'], inplace=True) test_data.dropna(subset=['Embarked'], inplace=True) X = train_data[['Pclass', 'Age', 'Fare', 'Sex', 'Embarked']] y = train_data['Survived'] logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) logmodel.score(X_train, y_train) logmodel.score(X_test, y_test) passengerId_test = test_data['PassengerId'] X_test = test_data[['Pclass', 'Age', 'Fare', 'Sex', 'Embarked']] logmodel = LogisticRegression() logmodel.fit(X, y) predictions = logmodel.predict(X_test) df_predictions = pd.DataFrame({'PassengerID': passengerId_test, 'Survived': predictions.astype(int)}) df_predictions.to_csv('logistic_regression_submission.csv', index=False)
code
49123033/cell_29
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data['Fare'].value_counts()
code
49123033/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) sns.jointplot(x='Calc_Fare', y='Survived', data=train_data, color='#4CB391')
code
49123033/cell_41
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.dropna(subset=['Embarked'], inplace=True) train_data.head()
code
49123033/cell_54
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.dropna(subset=['Embarked'], inplace=True) test_data.dropna(subset=['Embarked'], inplace=True) X = train_data[['Pclass', 'Age', 'Fare', 'Sex', 'Embarked']] y = train_data['Survived'] logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) logmodel.score(X_train, y_train) logmodel.score(X_test, y_test) X_test = test_data[['Pclass', 'Age', 'Fare', 'Sex', 'Embarked']] logmodel = LogisticRegression() logmodel.fit(X, y) predictions = logmodel.predict(X_test)
code
49123033/cell_50
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) print(classification_report(y_test, predictions))
code
49123033/cell_52
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) logmodel.score(X_train, y_train) logmodel.score(X_test, y_test)
code
49123033/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49123033/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') def impute_age(cols): Age = cols[0] Pclass = cols[1] if pd.isnull(Age): if Pclass == 1: return 37 elif Pclass == 2: return 29 else: return 24 else: return Age train_data['Age'].fillna(train_data[['Age', 'Pclass']].apply(impute_age, axis=1))
code
49123033/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) sns.jointplot(x='Fare', y='Survived', data=train_data, color='#4CB391')
code
49123033/cell_51
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression() logmodel.fit(X_train, y_train) predictions = logmodel.predict(X_test) logmodel.score(X_train, y_train)
code
49123033/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data[train_data['Age'].isna()]
code
49123033/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data['Survived'].value_counts()
code
49123033/cell_47
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression logmodel = LogisticRegression() logmodel.fit(X_train, y_train)
code
49123033/cell_17
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) sns.heatmap(train_data.isnull(), yticklabels=False, cbar=False, cmap='viridis')
code
49123033/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data['Embarked'].value_counts()
code
49123033/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data.head()
code
49123033/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.info()
code
49123033/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.head()
code
49123033/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data[train_data['Calc_Age'].isna()]
code
49123033/cell_5
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') plt.figure(figsize=(12, 7)) sns.boxplot(x='Pclass', y='Age', data=train_data, palette='winter')
code
49123033/cell_36
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test_data = pd.read_csv('/kaggle/input/titanic/test.csv') train_data = pd.read_csv('/kaggle/input/titanic/train.csv') train_data.drop(['Calc_Age'], axis=1, inplace=True) train_data.drop(['Calc_Fare'], axis=1, inplace=True) train_data[train_data['Embarked'].isna()]
code
34144083/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df.info()
code
34144083/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import ppscore as pps import seaborn as sns df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df.isnull().sum() / len(df) * 100 for col in df.columns: print(col, pps.score(df, col, 'Price')['ppscore'])
code
34144083/cell_1
[ "text_plain_output_1.png" ]
pip install ppscore
code
34144083/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df.isnull().sum() / len(df) * 100
code
34144083/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df.isnull().sum() / len(df) * 100 sns.heatmap(df.corr())
code
34144083/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import ppscore as pps import seaborn as sns df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df.isnull().sum() / len(df) * 100 df_cat = df[['Suburb', 'Address', 'Type', 'Method', 'SellerG', 'CouncilArea', 'Regionname', 'Postcode']] df_num = df[['Rooms', 'Distance', 'Bedroom2', 'Bathroom', 'Car', 'Landsize', 'BuildingArea', 'YearBuilt', 'Lattitude', 'Longtitude', 'Propertycount']] df_date = pd.to_datetime(df['Date']) y = df['Price'] for column in df_num.columns: if df_num[column].isna().any() == True: df_num.loc[:, column] = df_num[column].fillna(df_num[column].mean()) df_num.loc[:, column] = (df_num[column] - df_num[column].min()) / (df_num[column].max() - df_num[column].min()) df_num.describe().T
code
34144083/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/melbourne-housing-market/Melbourne_housing_FULL.csv') df[0:10].T
code