path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90118648/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test['Embarked'] = [1 if l == 'S' else 2 if l == 'C' else 3 for l in test['Embarked']] test['Embarked'].value_counts()
code
90118648/cell_27
[ "text_plain_output_1.png" ]
from category_encoders import TargetEncoder from sklearn.impute import KNNImputer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked'] X_train = train[predictors] y_train = train['Survived'] X_test = test[predictors] knn_imputer = KNNImputer() X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors) X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category') encoder = TargetEncoder(return_df=True) X_train_te = encoder.fit_transform(X_train, y_train) X_train_te.describe()
code
90118648/cell_37
[ "text_html_output_1.png" ]
from category_encoders import TargetEncoder from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.impute import KNNImputer from sklearn.preprocessing import PowerTransformer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked'] X_train = train[predictors] y_train = train['Survived'] X_test = test[predictors] knn_imputer = KNNImputer() X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors) X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors) X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category') X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category') X_train_ohe = pd.get_dummies(X_train) X_test_ohe = pd.get_dummies(X_test) encoder = TargetEncoder(return_df=True) X_train_te = encoder.fit_transform(X_train, y_train) X_test_te = encoder.transform(X_test) num = ['Age', 'Parch', 'SibSp', 'Fare'] power = PowerTransformer() train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num) test_power = pd.DataFrame(power.transform(X_test[num]), columns=num) X_train_ohe_power = pd.concat([train_power[num], X_train_ohe.drop(num, axis=1)], axis=1) X_test_ohe_power = pd.concat([test_power[num], X_test_ohe.drop(num, axis=1)], axis=1) X_train_te_power = pd.concat([train_power[num], X_train_te.drop(num, axis=1)], axis=1) X_test_te_power = pd.concat([test_power[num], X_test_te.drop(num, axis=1)], axis=1) rf = RandomForestClassifier(n_estimators=70, max_depth=4, random_state=42) rf.fit(X_train_ohe_power, y_train) y_rf = rf.predict(X_test_ohe_power) results = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': y_rf}) results.to_csv('submission.csv', index=False) results['Survived'].value_counts(normalize=True)
code
90118648/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') test.info()
code
90118648/cell_36
[ "text_html_output_1.png" ]
from category_encoders import TargetEncoder from sklearn.impute import KNNImputer from sklearn.preprocessing import PowerTransformer import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') predictors = ['Pclass', 'Sex', 'Age', 'Parch', 'SibSp', 'Fare', 'Embarked'] X_train = train[predictors] y_train = train['Survived'] X_test = test[predictors] knn_imputer = KNNImputer() X_train = pd.DataFrame(knn_imputer.fit_transform(X_train), columns=predictors) X_test = pd.DataFrame(knn_imputer.transform(X_test), columns=predictors) X_train[['Sex', 'Embarked', 'Pclass', 'Title']] = X_train[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category') X_test[['Sex', 'Embarked', 'Pclass', 'Title']] = X_test[['Sex', 'Embarked', 'Pclass', 'Title']].astype('category') X_train_ohe = pd.get_dummies(X_train) X_test_ohe = pd.get_dummies(X_test) encoder = TargetEncoder(return_df=True) X_train_te = encoder.fit_transform(X_train, y_train) X_test_te = encoder.transform(X_test) num = ['Age', 'Parch', 'SibSp', 'Fare'] power = PowerTransformer() train_power = pd.DataFrame(power.fit_transform(X_train[num]), columns=num) test_power = pd.DataFrame(power.transform(X_test[num]), columns=num) X_train_ohe_power = pd.concat([train_power[num], X_train_ohe.drop(num, axis=1)], axis=1) X_test_ohe_power = pd.concat([test_power[num], X_test_ohe.drop(num, axis=1)], axis=1) X_train_te_power = pd.concat([train_power[num], X_train_te.drop(num, axis=1)], axis=1) X_test_te_power = pd.concat([test_power[num], X_test_te.drop(num, axis=1)], axis=1) X_test_te_power.head()
code
18118296/cell_4
[ "text_html_output_1.png" ]
import os data_dir = '../input' os.listdir(f'{data_dir}')
code
18118296/cell_6
[ "text_plain_output_1.png" ]
import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10)
code
18118296/cell_29
[ "text_html_output_1.png" ]
from bisect import bisect from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, from sklearn.model_selection import KFold import numpy as np import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10) train_df_raw.describe(include='all').T test_df_raw.describe(include='all').T train_df = train_df_raw test_df = test_df_raw all_df = [train_df, test_df] category_maps = {} def Categorify(df: pd.DataFrame, cat_names): for cat_name in cat_names: uniques = df[cat_name].unique() category_maps[cat_name] = {i: uniques[i] for i in range(len(uniques))} df[cat_name] = [np.where(uniques == key)[0][0] for key in df[cat_name]] def Quantile(df: pd.DataFrame, quant_names, quants=[0.25, 0.5, 0.75]): for quant_name in quant_names: quant_col_name = f'{quant_name}_quantile' quant_vals = [np.quantile(df[quant_name], quant) for quant in quants] df[quant_col_name] = [bisect(quant_vals, x) for x in df[quant_name]] SEED = 0 NFOLDS = 5 kf = KFold(n_splits=NFOLDS, random_state=SEED) class SklearnHelper(object): def __init__(self, name, clf, seed=0, params=None): params['random_state'] = seed self.name = name self.clf = clf(**params) def train(self, x_train, y_train): self.clf.fit(x_train, y_train) def predict(self, x): return self.clf.predict(x) def fit(self, x, y): return self.clf.fit(x, y) def feature_importances(self, x, y): return self.clf.fit(x, y).feature_importances_ def get_oof(clf, x_train, y_train, x_test): oof_train = np.zeros((x_train.shape[0],)) oof_test = np.zeros((x_test.shape[0],)) oof_test_skf = np.empty((NFOLDS, x_test.shape[0])) for i, (train_index, test_index) in enumerate(kf.split(x_train, y_train)): x_tr = x_train[train_index] y_tr = y_train[train_index] x_te = x_train[test_index] clf.train(x_tr, y_tr) oof_train[test_index] = clf.predict(x_te) oof_test_skf[i, :] = clf.predict(x_test) oof_test[:] = oof_test_skf.mean(axis=0) return (oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)) classifier_stack = [SklearnHelper('RandomForest', clf=RandomForestClassifier, seed=SEED, params={'n_jobs': -1, 'n_estimators': 500, 'max_depth': 6, 'min_samples_leaf': 2, 'max_features': 'sqrt', 'verbose': 0}), SklearnHelper('ExtraTrees', clf=ExtraTreesClassifier, seed=SEED, params={'n_jobs': -1, 'n_estimators': 500, 'max_depth': 8, 'min_samples_leaf': 2, 'verbose': 0}), SklearnHelper('AdaBoost', clf=AdaBoostClassifier, seed=SEED, params={'n_estimators': 500, 'learning_rate': 0.75}), SklearnHelper('GradientBoost', clf=GradientBoostingClassifier, seed=SEED, params={'n_estimators': 500, 'max_depth': 5, 'min_samples_leaf': 2, 'verbose': 0})] dep_var = 'Survived' drop_vars = ['PassengerId', 'Name', 'Ticket', 'Cabin'] x_train_df = train_df.drop(drop_vars, axis=1).drop(dep_var, axis=1) x_train = x_train_df.values y_train = train_df[dep_var].ravel() x_test = test_df.drop(drop_vars, axis=1).values oofs = {clf.name: get_oof(clf, x_train, y_train, x_test) for clf in classifier_stack} print('Training is complete')
code
18118296/cell_7
[ "text_plain_output_1.png" ]
import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10) train_df_raw.describe(include='all').T
code
18118296/cell_32
[ "text_html_output_1.png" ]
from bisect import bisect from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, from sklearn.model_selection import KFold import numpy as np import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10) train_df_raw.describe(include='all').T test_df_raw.describe(include='all').T train_df = train_df_raw test_df = test_df_raw all_df = [train_df, test_df] category_maps = {} def Categorify(df: pd.DataFrame, cat_names): for cat_name in cat_names: uniques = df[cat_name].unique() category_maps[cat_name] = {i: uniques[i] for i in range(len(uniques))} df[cat_name] = [np.where(uniques == key)[0][0] for key in df[cat_name]] def Quantile(df: pd.DataFrame, quant_names, quants=[0.25, 0.5, 0.75]): for quant_name in quant_names: quant_col_name = f'{quant_name}_quantile' quant_vals = [np.quantile(df[quant_name], quant) for quant in quants] df[quant_col_name] = [bisect(quant_vals, x) for x in df[quant_name]] SEED = 0 NFOLDS = 5 kf = KFold(n_splits=NFOLDS, random_state=SEED) class SklearnHelper(object): def __init__(self, name, clf, seed=0, params=None): params['random_state'] = seed self.name = name self.clf = clf(**params) def train(self, x_train, y_train): self.clf.fit(x_train, y_train) def predict(self, x): return self.clf.predict(x) def fit(self, x, y): return self.clf.fit(x, y) def feature_importances(self, x, y): return self.clf.fit(x, y).feature_importances_ def get_oof(clf, x_train, y_train, x_test): oof_train = np.zeros((x_train.shape[0],)) oof_test = np.zeros((x_test.shape[0],)) oof_test_skf = np.empty((NFOLDS, x_test.shape[0])) for i, (train_index, test_index) in enumerate(kf.split(x_train, y_train)): x_tr = x_train[train_index] y_tr = y_train[train_index] x_te = x_train[test_index] clf.train(x_tr, y_tr) oof_train[test_index] = clf.predict(x_te) oof_test_skf[i, :] = clf.predict(x_test) oof_test[:] = oof_test_skf.mean(axis=0) return (oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)) classifier_stack = [SklearnHelper('RandomForest', clf=RandomForestClassifier, seed=SEED, params={'n_jobs': -1, 'n_estimators': 500, 'max_depth': 6, 'min_samples_leaf': 2, 'max_features': 'sqrt', 'verbose': 0}), SklearnHelper('ExtraTrees', clf=ExtraTreesClassifier, seed=SEED, params={'n_jobs': -1, 'n_estimators': 500, 'max_depth': 8, 'min_samples_leaf': 2, 'verbose': 0}), SklearnHelper('AdaBoost', clf=AdaBoostClassifier, seed=SEED, params={'n_estimators': 500, 'learning_rate': 0.75}), SklearnHelper('GradientBoost', clf=GradientBoostingClassifier, seed=SEED, params={'n_estimators': 500, 'max_depth': 5, 'min_samples_leaf': 2, 'verbose': 0})] dep_var = 'Survived' drop_vars = ['PassengerId', 'Name', 'Ticket', 'Cabin'] x_train_df = train_df.drop(drop_vars, axis=1).drop(dep_var, axis=1) x_train = x_train_df.values y_train = train_df[dep_var].ravel() x_test = test_df.drop(drop_vars, axis=1).values oofs = {clf.name: get_oof(clf, x_train, y_train, x_test) for clf in classifier_stack} base_predictions_train = pd.DataFrame({key: oofs[key][0].ravel() for key in oofs}) base_predictions_train.head()
code
18118296/cell_8
[ "text_html_output_1.png" ]
import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10) test_df_raw.describe(include='all').T
code
18118296/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import os import pandas as pd data_dir = '../input' os.listdir(f'{data_dir}') train_df_raw = pd.read_csv(f'{data_dir}/train.csv', low_memory=False) test_df_raw = pd.read_csv(f'{data_dir}/test.csv', low_memory=False) train_df_raw.sample(10) train_df_raw.describe(include='all').T test_df_raw.describe(include='all').T train_df = train_df_raw test_df = test_df_raw all_df = [train_df, test_df] category_maps = {} def Categorify(df: pd.DataFrame, cat_names): for cat_name in cat_names: uniques = df[cat_name].unique() category_maps[cat_name] = {i: uniques[i] for i in range(len(uniques))} df[cat_name] = [np.where(uniques == key)[0][0] for key in df[cat_name]] cat_names = ['Sex', 'Embarked'] list(map(lambda df: Categorify(df, cat_names), all_df)) category_maps
code
106211827/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import tensorflow as tf from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer from transformers import WEIGHTS_NAME, CONFIG_NAME import os
code
106211827/cell_23
[ "text_plain_output_1.png" ]
from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer import os import os import tensorflow as tf import numpy as np import pandas as pd import os data_location = 'data' if not os.path.exists(data_location): os.makedirs(data_location) tokenizer = GPT2Tokenizer.from_pretrained('gpt2') configuration = GPT2Config(vocab_size=tokenizer.vocab_size, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id) model = TFGPT2LMHeadModel(configuration) textfile = open('./data/poetry.txt', 'r', encoding='utf-8') text = textfile.read() textfile.close() string_tokenized = tokenizer.encode(text) examples = [] block_size = 100 BATCH_SIZE = 12 BUFFER_SIZE = 1000 for i in range(0, len(string_tokenized) - block_size + 1, block_size): examples.append(string_tokenized[i:i + block_size]) inputs, labels = ([], []) for ex in examples: inputs.append(ex[:-1]) labels.append(ex[1:]) dataset = tf.data.Dataset.from_tensor_slices((inputs, labels)) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) optimizer = tf.keras.optimizers.Adam(learning_rate=3e-05, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer=optimizer, loss=loss) model.fit(dataset, epochs=30) save_location = './models' if not os.path.exists(save_location): os.makedirs(save_location) model.save_pretrained(save_location) tokenizer.save_pretrained(save_location)
code
106211827/cell_11
[ "text_plain_output_1.png" ]
from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
code
106211827/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained('gpt2') configuration = GPT2Config(vocab_size=tokenizer.vocab_size, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id) model = TFGPT2LMHeadModel(configuration) textfile = open('./data/poetry.txt', 'r', encoding='utf-8') text = textfile.read() textfile.close() string_tokenized = tokenizer.encode(text) examples = [] block_size = 100 BATCH_SIZE = 12 BUFFER_SIZE = 1000 for i in range(0, len(string_tokenized) - block_size + 1, block_size): examples.append(string_tokenized[i:i + block_size]) inputs, labels = ([], []) for ex in examples: inputs.append(ex[:-1]) labels.append(ex[1:]) dataset = tf.data.Dataset.from_tensor_slices((inputs, labels)) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) print('Done creating dataset')
code
106211827/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106211827/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
!pip install tokenizer !pip install transformers
code
106211827/cell_22
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer import tensorflow as tf tokenizer = GPT2Tokenizer.from_pretrained('gpt2') configuration = GPT2Config(vocab_size=tokenizer.vocab_size, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id) model = TFGPT2LMHeadModel(configuration) textfile = open('./data/poetry.txt', 'r', encoding='utf-8') text = textfile.read() textfile.close() string_tokenized = tokenizer.encode(text) examples = [] block_size = 100 BATCH_SIZE = 12 BUFFER_SIZE = 1000 for i in range(0, len(string_tokenized) - block_size + 1, block_size): examples.append(string_tokenized[i:i + block_size]) inputs, labels = ([], []) for ex in examples: inputs.append(ex[:-1]) labels.append(ex[1:]) dataset = tf.data.Dataset.from_tensor_slices((inputs, labels)) dataset = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True) optimizer = tf.keras.optimizers.Adam(learning_rate=3e-05, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) model.compile(optimizer=optimizer, loss=loss) model.fit(dataset, epochs=30)
code
106211827/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from transformers import GPT2Config, TFGPT2LMHeadModel, GPT2Tokenizer tokenizer = GPT2Tokenizer.from_pretrained('gpt2') configuration = GPT2Config(vocab_size=tokenizer.vocab_size, bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id) model = TFGPT2LMHeadModel(configuration)
code
128015730/cell_7
[ "text_plain_output_1.png" ]
!python /kaggle/input/iot23bymyself/creatDataset.py
code
128015730/cell_8
[ "text_plain_output_1.png" ]
!zip -r /kaggle/working/dataImage imagesData
code
16147726/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.datasets import cifar10 from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.layers import Conv2D, MaxPooling2D import os print(os.listdir('../input'))
code
16147726/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.layers import Conv2D, MaxPooling2D from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten from tensorflow.keras.models import Sequential import pickle import pickle with open('../input/X.pickle', 'rb') as fp: X_feature = pickle.load(fp) with open('../input/Y.pickle', 'rb') as fp: Y_label = pickle.load(fp) X_feature = X_feature / 255.0 model = Sequential() model.add(Conv2D(64, (3, 3), input_shape=X_feature.shape[1:])) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3))) model.add(Activation('relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(64)) model.add(Activation('relu')) model.add(Dense(4)) model.add(Activation('softmax')) model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd', metrics=['accuracy']) model.fit(x=X_feature, y=Y_label, batch_size=20, epochs=50, validation_split=0.1, shuffle=True)
code
1010749/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) has_zero_columns = [] for i in int_columns: if 0 in raw_data[i].unique(): has_zero_columns.append(i) counting_columns = ['BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] (categorical_columns, numerical_columns) (binary_columns, temporal_columns) (bounded_columns, has_zero_columns, counting_columns)
code
1010749/cell_9
[ "image_output_11.png", "image_output_239.png", "image_output_536.png", "image_output_98.png", "image_output_573.png", "image_output_477.png", "image_output_538.png", "image_output_337.png", "image_output_416.png", "image_output_452.png", "image_output_508.png", "image_output_121.png", "image_output_586.png", "image_output_425.png", "image_output_180.png", "image_output_331.png", "image_output_466.png", "image_output_607.png", "image_output_470.png", "image_output_384.png", "image_output_526.png", "image_output_555.png", "image_output_303.png", "image_output_453.png", "image_output_157.png", "image_output_612.png", "image_output_668.png", "image_output_518.png", "image_output_74.png", "image_output_515.png", "image_output_279.png", "image_output_588.png", "image_output_571.png", "image_output_461.png", "image_output_181.png", "image_output_663.png", "image_output_412.png", "image_output_572.png", "image_output_290.png", "image_output_156.png", "image_output_707.png", "image_output_310.png", "image_output_204.png", "image_output_299.png", "image_output_684.png", "image_output_393.png", "image_output_330.png", "image_output_174.png", "image_output_244.png", "image_output_418.png", "image_output_82.png", "image_output_689.png", "image_output_683.png", "image_output_421.png", "image_output_670.png", "image_output_581.png", "image_output_593.png", "image_output_305.png", "image_output_173.png", "image_output_24.png", "image_output_209.png", "image_output_478.png", "image_output_454.png", "image_output_380.png", "image_output_485.png", "image_output_159.png", "image_output_139.png", "image_output_104.png", "image_output_220.png", "image_output_500.png", "image_output_671.png", "image_output_480.png", "image_output_697.png", "image_output_711.png", "image_output_695.png", "image_output_46.png", "image_output_207.png", "image_output_481.png", "image_output_327.png", "image_output_661.png", "image_output_295.png", "image_output_495.png", "image_output_251.png", "image_output_232.png", "image_output_617.png", "image_output_208.png", "image_output_106.png", "image_output_431.png", "image_output_85.png", "image_output_462.png", "image_output_349.png", "image_output_368.png", "image_output_373.png", "image_output_692.png", "image_output_700.png", "image_output_427.png", "image_output_634.png", "image_output_149.png", "image_output_108.png", "image_output_552.png", "image_output_270.png", "image_output_456.png", "image_output_150.png", "image_output_383.png", "image_output_666.png", "image_output_25.png", "image_output_266.png", "image_output_190.png", "image_output_228.png", "image_output_183.png", "image_output_202.png", "image_output_527.png", "image_output_534.png", "image_output_532.png", "image_output_473.png", "image_output_275.png", "image_output_312.png", "image_output_546.png", "image_output_496.png", "image_output_77.png", "image_output_522.png", "image_output_652.png", "image_output_179.png", "image_output_584.png", "image_output_319.png", "image_output_148.png", "image_output_582.png", "image_output_47.png", "image_output_458.png", "image_output_344.png", "image_output_511.png", "image_output_141.png", "image_output_233.png", "image_output_575.png", "image_output_472.png", "image_output_537.png", "image_output_229.png", "image_output_676.png", "image_output_434.png", "image_output_397.png", "image_output_587.png", "image_output_316.png", "image_output_387.png", "image_output_565.png", "image_output_242.png", "image_output_171.png", "image_output_603.png", "image_output_589.png", "image_output_457.png", "image_output_78.png", "image_output_219.png", "image_output_227.png", "image_output_170.png", "image_output_17.png", "image_output_503.png", "image_output_404.png", "image_output_709.png", "image_output_30.png", "image_output_257.png", "image_output_524.png", "image_output_488.png", "image_output_486.png", "image_output_669.png", "image_output_73.png", "image_output_445.png", "image_output_309.png", "image_output_221.png", "image_output_547.png", "image_output_355.png", "image_output_465.png", "image_output_72.png", "image_output_356.png", "image_output_336.png", "image_output_710.png", "image_output_14.png", "image_output_590.png", "image_output_59.png", "image_output_351.png", "image_output_39.png", "image_output_562.png", "image_output_464.png", "image_output_97.png", "image_output_429.png", "image_output_378.png", "image_output_247.png", "image_output_479.png", "image_output_717.png", "image_output_357.png", "image_output_361.png", "image_output_28.png", "image_output_408.png", "image_output_502.png", "image_output_86.png", "image_output_529.png", "image_output_137.png", "image_output_687.png", "image_output_626.png", "image_output_682.png", "image_output_640.png", "image_output_160.png", "image_output_234.png", "image_output_459.png", "image_output_84.png", "image_output_125.png", "image_output_679.png", "image_output_474.png", "image_output_81.png", "image_output_300.png", "image_output_720.png", "image_output_165.png", "image_output_194.png", "image_output_568.png", "image_output_342.png", "image_output_273.png", "image_output_558.png", "image_output_23.png", "image_output_678.png", "image_output_633.png", "image_output_531.png", "image_output_136.png", "image_output_367.png", "image_output_34.png", "image_output_308.png", "image_output_64.png", "image_output_282.png", "image_output_119.png", "image_output_360.png", "image_output_595.png", "image_output_599.png", "image_output_635.png", "image_output_433.png", "image_output_237.png", "image_output_506.png", "image_output_225.png", "image_output_131.png", "image_output_388.png", "image_output_134.png", "image_output_178.png", "image_output_177.png", "image_output_377.png", "image_output_188.png", "image_output_592.png", "image_output_144.png", "image_output_680.png", "image_output_415.png", "image_output_335.png", "image_output_688.png", "image_output_696.png", "image_output_252.png", "image_output_468.png", "image_output_509.png", "image_output_13.png", "image_output_597.png", "image_output_616.png", "image_output_564.png", "image_output_410.png", "image_output_128.png", "image_output_667.png", "image_output_184.png", "image_output_155.png", "image_output_630.png", "image_output_651.png", "image_output_40.png", "image_output_224.png", "image_output_5.png", "image_output_620.png", "image_output_48.png", "image_output_405.png", "image_output_583.png", "image_output_639.png", "image_output_114.png", "image_output_146.png", "image_output_543.png", "image_output_68.png", "image_output_195.png", "image_output_641.png", "image_output_591.png", "image_output_142.png", "image_output_280.png", "image_output_109.png", "image_output_440.png", "image_output_75.png", "image_output_578.png", "image_output_610.png", "image_output_698.png", "image_output_392.png", "image_output_18.png", "image_output_127.png", "image_output_513.png", "image_output_638.png", "image_output_143.png", "image_output_554.png", "image_output_324.png", "image_output_647.png", "image_output_494.png", "image_output_435.png", "image_output_314.png", "image_output_489.png", "image_output_389.png", "image_output_313.png", "image_output_283.png", "image_output_58.png", "image_output_245.png", "image_output_118.png", "image_output_512.png", "image_output_145.png", "image_output_625.png", "image_output_254.png", "image_output_269.png", "image_output_385.png", "image_output_636.png", "image_output_296.png", "image_output_110.png", "image_output_441.png", "image_output_116.png", "image_output_286.png", "image_output_277.png", "image_output_169.png", "image_output_271.png", "image_output_525.png", "image_output_374.png", "image_output_693.png", "image_output_107.png", "image_output_92.png", "image_output_21.png", "image_output_551.png", "image_output_608.png", "image_output_650.png", "image_output_372.png", "image_output_248.png", "image_output_120.png", "image_output_276.png", "image_output_463.png", "image_output_256.png", "image_output_665.png", "image_output_332.png", "image_output_549.png", "image_output_105.png", "image_output_52.png", "image_output_436.png", "image_output_288.png", "image_output_362.png", "image_output_703.png", "image_output_307.png", "image_output_422.png", "image_output_484.png", "image_output_501.png", "image_output_467.png", "image_output_292.png", "image_output_664.png", "image_output_499.png", "image_output_655.png", "image_output_60.png", "image_output_672.png", "image_output_7.png", "image_output_654.png", "image_output_343.png", "image_output_62.png", "image_output_631.png", "image_output_96.png", "image_output_186.png", "image_output_658.png", "image_output_182.png", "image_output_580.png", "image_output_609.png", "image_output_152.png", "image_output_322.png", "image_output_185.png", "image_output_713.png", "image_output_576.png", "image_output_235.png", "image_output_167.png", "image_output_437.png", "image_output_56.png", "image_output_386.png", "image_output_196.png", "image_output_346.png", "image_output_31.png", "image_output_621.png", "image_output_442.png", "image_output_428.png", "image_output_65.png", "image_output_662.png", "image_output_115.png", "image_output_528.png", "image_output_291.png", "image_output_645.png", "image_output_716.png", "image_output_419.png", "image_output_20.png", "image_output_359.png", "image_output_69.png", "image_output_619.png", "image_output_298.png", "image_output_469.png", "image_output_395.png", "image_output_369.png", "image_output_704.png", "image_output_712.png", "image_output_241.png", "image_output_32.png", "image_output_53.png", "image_output_400.png", "image_output_230.png", "image_output_545.png", "image_output_521.png", "image_output_352.png", "image_output_409.png", "image_output_451.png", "image_output_4.png", "image_output_304.png", "image_output_483.png", "image_output_642.png", "image_output_605.png", "image_output_394.png", "image_output_51.png", "image_output_274.png", "image_output_338.png", "image_output_594.png", "image_output_318.png", "image_output_162.png", "image_output_210.png", "image_output_426.png", "image_output_103.png", "image_output_348.png", "image_output_226.png", "image_output_516.png", "image_output_201.png", "image_output_253.png", "image_output_341.png", "image_output_117.png", "image_output_391.png", "image_output_544.png", "image_output_217.png", "image_output_411.png", "image_output_339.png", "image_output_83.png", "image_output_417.png", "image_output_579.png", "image_output_382.png", "image_output_317.png", "image_output_657.png", "image_output_519.png", "image_output_213.png", "image_output_172.png", "image_output_718.png", "image_output_705.png", "image_output_42.png", "image_output_306.png", "image_output_561.png", "image_output_381.png", "image_output_648.png", "image_output_240.png", "image_output_35.png", "image_output_398.png", "image_output_263.png", "image_output_585.png", "image_output_677.png", "image_output_311.png", "image_output_613.png", "image_output_90.png", "image_output_548.png", "image_output_627.png", "image_output_302.png", "image_output_574.png", "image_output_41.png", "image_output_57.png", "image_output_673.png", "image_output_702.png", "image_output_260.png", "image_output_222.png", "image_output_482.png", "image_output_329.png", "image_output_566.png", "image_output_36.png", "image_output_649.png", "image_output_475.png", "image_output_413.png", "image_output_265.png", "image_output_476.png", "image_output_8.png", "image_output_37.png", "image_output_66.png", "image_output_659.png", "image_output_533.png", "image_output_16.png", "image_output_557.png", "image_output_192.png", "image_output_498.png", "image_output_420.png", "image_output_553.png", "image_output_211.png", "image_output_163.png", "image_output_91.png", "image_output_447.png", "image_output_602.png", "image_output_70.png", "image_output_443.png", "image_output_448.png", "image_output_686.png", "image_output_138.png", "image_output_615.png", "image_output_158.png", "image_output_285.png", "image_output_67.png", "image_output_27.png", "image_output_353.png", "image_output_354.png", "image_output_406.png", "image_output_430.png", "image_output_432.png", "image_output_287.png", "image_output_261.png", "image_output_424.png", "image_output_333.png", "image_output_122.png", "image_output_54.png", "image_output_297.png", "image_output_323.png", "image_output_189.png", "image_output_363.png", "image_output_6.png", "image_output_618.png", "image_output_439.png", "image_output_301.png", "image_output_45.png", "image_output_629.png", "image_output_246.png", "image_output_403.png", "image_output_681.png", "image_output_365.png", "image_output_550.png", "image_output_492.png", "image_output_250.png", "image_output_63.png", "image_output_71.png", "image_output_153.png", "image_output_126.png", "image_output_281.png", "image_output_699.png", "image_output_471.png", "image_output_715.png", "image_output_80.png", "image_output_289.png", "image_output_606.png", "image_output_514.png", "image_output_112.png", "image_output_164.png", "image_output_293.png", "image_output_598.png", "image_output_326.png", "image_output_347.png", "image_output_95.png", "image_output_123.png", "image_output_147.png", "image_output_198.png", "image_output_523.png", "image_output_370.png", "image_output_212.png", "image_output_278.png", "image_output_364.png", "image_output_340.png", "image_output_402.png", "image_output_243.png", "image_output_694.png", "image_output_643.png", "image_output_93.png", "image_output_205.png", "image_output_206.png", "image_output_401.png", "image_output_596.png", "image_output_214.png", "image_output_12.png", "image_output_504.png", "image_output_507.png", "image_output_284.png", "image_output_161.png", "image_output_231.png", "image_output_22.png", "image_output_132.png", "image_output_328.png", "image_output_320.png", "image_output_89.png", "image_output_315.png", "image_output_268.png", "image_output_55.png", "image_output_556.png", "image_output_423.png", "image_output_701.png", "image_output_535.png", "image_output_399.png", "image_output_530.png", "image_output_577.png", "image_output_133.png", "image_output_560.png", "image_output_216.png", "image_output_487.png", "image_output_218.png", "image_output_637.png", "image_output_191.png", "image_output_541.png", "image_output_151.png", "image_output_200.png", "image_output_660.png", "image_output_438.png", "image_output_685.png", "image_output_294.png", "image_output_94.png", "image_output_3.png", "image_output_111.png", "image_output_101.png", "image_output_366.png", "image_output_249.png", "image_output_623.png", "image_output_135.png", "image_output_29.png", "image_output_238.png", "image_output_325.png", "image_output_559.png", "image_output_708.png", "image_output_193.png", "image_output_539.png", "image_output_614.png", "image_output_611.png", "image_output_187.png", "image_output_44.png", "image_output_517.png", "image_output_199.png", "image_output_130.png", "image_output_628.png", "image_output_542.png", "image_output_601.png", "image_output_43.png", "image_output_2.png", "image_output_375.png", "image_output_262.png", "image_output_1.png", "image_output_350.png", "image_output_520.png", "image_output_10.png", "image_output_570.png", "image_output_632.png", "image_output_510.png", "image_output_259.png", "image_output_604.png", "image_output_168.png", "image_output_258.png", "image_output_407.png", "image_output_719.png", "image_output_646.png", "image_output_236.png", "image_output_497.png", "image_output_154.png", "image_output_102.png", "image_output_656.png", "image_output_644.png", "image_output_653.png", "image_output_176.png", "image_output_321.png", "image_output_706.png", "image_output_175.png", "image_output_567.png", "image_output_124.png", "image_output_505.png", "image_output_88.png", "image_output_272.png", "image_output_33.png", "image_output_140.png", "image_output_449.png", "image_output_490.png", "image_output_569.png", "image_output_450.png", "image_output_714.png", "image_output_345.png", "image_output_358.png", "image_output_690.png", "image_output_87.png", "image_output_255.png", "image_output_540.png", "image_output_50.png", "image_output_455.png", "image_output_675.png", "image_output_15.png", "image_output_267.png", "image_output_99.png", "image_output_49.png", "image_output_197.png", "image_output_624.png", "image_output_100.png", "image_output_129.png", "image_output_493.png", "image_output_491.png", "image_output_691.png", "image_output_444.png", "image_output_166.png", "image_output_76.png", "image_output_223.png", "image_output_9.png", "image_output_19.png", "image_output_371.png", "image_output_79.png", "image_output_215.png", "image_output_61.png", "image_output_622.png", "image_output_396.png", "image_output_203.png", "image_output_563.png", "image_output_390.png", "image_output_414.png", "image_output_38.png", "image_output_334.png", "image_output_113.png", "image_output_26.png", "image_output_460.png", "image_output_446.png", "image_output_600.png", "image_output_376.png", "image_output_674.png", "image_output_264.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] print(c) length = len(raw_data) usable = length - counts print('%s/%s, %.1f Percent, %s usable rows' % (counts, length, counts / length * 100, usable))
code
1010749/cell_33
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) counting_columns = ['BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] month_code = raw_data['MoSold'].unique() def get_season(month_code): m = month_code if m in [3, 4, 5]: return 'Spring' elif m in [6, 7, 8]: return 'Summer' elif m in [9, 10, 11]: return 'Autumn' elif m in [12, 1, 2]: return 'Winter' seasons_series = raw_data['MoSold'].apply(get_season) seasons_series.name = 'season' year_sold = raw_data['YrSold'] year_built = raw_data['YearBuilt'] house_age_series = year_sold.subtract(year_built) house_age_series.name = 'HouseAge' garage_year_built = raw_data['GarageYrBlt'] year_built = raw_data['YearBuilt'] garage_age_series = year_sold.subtract(garage_year_built) garage_age_series.name = 'GarageAge' year_remod_add = raw_data['YearRemodAdd'] year_built = raw_data['YearBuilt'] remod_recency_series = year_sold.subtract(year_remod_add) remod_recency_series.name = 'RemodRecency' recent_df = raw_data recent_df = recent_df.assign(garage_age=garage_age_series) recent_df = recent_df.assign(remod_recency=remod_recency_series) recent_df = recent_df.assign(house_age=house_age_series) recent_df = recent_df.assign(season=seasons_series) categorical_columns = categorical_columns + ['season'] numerical_columns = numerical_columns + ['garage_age', 'remod_recency', 'house_age'] recent_df[categorical_columns] = recent_df[categorical_columns].fillna('None') import matplotlib.pyplot as plt plt.style.use('ggplot') for c in categorical_columns + bounded_columns + counting_columns: if c in temporal_columns and c != 'MoSold': continue means = recent_df[['SalePrice', c]].groupby(c).mean().sort_values('SalePrice', ascending=False) errors = recent_df[['SalePrice', c]].groupby(c).std() barplot = means.plot.bar(yerr=errors) plt.title(c) plt.show()
code
1010749/cell_40
[ "image_output_11.png", "image_output_24.png", "image_output_46.png", "image_output_25.png", "image_output_47.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_39.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_18.png", "image_output_21.png", "image_output_52.png", "image_output_7.png", "image_output_56.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_53.png", "image_output_4.png", "image_output_51.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_57.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_54.png", "image_output_6.png", "image_output_45.png", "image_output_12.png", "image_output_22.png", "image_output_55.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
from statsmodels.graphics.factorplots import interaction_plot from statsmodels.stats.weightstats import ztest import matplotlib.pyplot as plt import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) counting_columns = ['BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] month_code = raw_data['MoSold'].unique() def get_season(month_code): m = month_code if m in [3, 4, 5]: return 'Spring' elif m in [6, 7, 8]: return 'Summer' elif m in [9, 10, 11]: return 'Autumn' elif m in [12, 1, 2]: return 'Winter' seasons_series = raw_data['MoSold'].apply(get_season) seasons_series.name = 'season' year_sold = raw_data['YrSold'] year_built = raw_data['YearBuilt'] house_age_series = year_sold.subtract(year_built) house_age_series.name = 'HouseAge' garage_year_built = raw_data['GarageYrBlt'] year_built = raw_data['YearBuilt'] garage_age_series = year_sold.subtract(garage_year_built) garage_age_series.name = 'GarageAge' year_remod_add = raw_data['YearRemodAdd'] year_built = raw_data['YearBuilt'] remod_recency_series = year_sold.subtract(year_remod_add) remod_recency_series.name = 'RemodRecency' recent_df = raw_data recent_df = recent_df.assign(garage_age=garage_age_series) recent_df = recent_df.assign(remod_recency=remod_recency_series) recent_df = recent_df.assign(house_age=house_age_series) recent_df = recent_df.assign(season=seasons_series) categorical_columns = categorical_columns + ['season'] numerical_columns = numerical_columns + ['garage_age', 'remod_recency', 'house_age'] recent_df[categorical_columns] = recent_df[categorical_columns].fillna('None') import matplotlib.pyplot as plt plt.style.use("ggplot") for c in (categorical_columns + bounded_columns + counting_columns): if c in temporal_columns and c != "MoSold": continue means = recent_df[["SalePrice",c]].groupby(c).mean().sort_values("SalePrice",ascending=False) errors = recent_df[["SalePrice",c]].groupby(c).std() barplot = means.plot.bar(yerr=errors) plt.title(c) plt.show() data = recent_df iv = 'CentralAir' dv = 'SalePrice' for c in categorical_columns + bounded_columns + counting_columns: if c in temporal_columns: continue from statsmodels.stats.weightstats import ztest ztest_dictionary = {} for c in categorical_columns + bounded_columns + counting_columns: if c in temporal_columns: continue subclasses = recent_df[c].unique() delete = [] ttests, pvalues, combinations = ([], [], []) for sc in subclasses: for scsc in subclasses: if scsc in delete or scsc == sc: continue sample_one = recent_df[recent_df[c] == sc]['SalePrice'] sample_two = recent_df[recent_df[c] == scsc]['SalePrice'] ttest = ztest(sample_one, sample_two)[0] pvalue = ztest(sample_one, sample_two)[1] combination = '%s * %s' % (sc, scsc) ttests.append(ttest) pvalues.append(pvalue) combinations.append(combination) d = {'t-test': ttests, 'p-value': pvalues} ztest_dictionary[c] = pd.DataFrame(index=combinations, data=d) from statsmodels.graphics.factorplots import interaction_plot categorical_columnss = categorical_columns + counting_columns + bounded_columns for c in categorical_columnss: if c in temporal_columns: continue num = recent_df['SalePrice'] c1 = recent_df[c] delete = [] for cc in categorical_columnss: if cc in temporal_columns or cc == c or cc in delete: continue c2 = recent_df[cc] c1_classes = len(recent_df[c]) c2_classes = len(recent_df[cc]) if c2_classes < c1_classes: temp = c1 c1 = c2 c2 = temp plt.style.use('ggplot') fig = interaction_plot(c2, c1, num, ms=12) plt.show() delete.append(cc)
code
1010749/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) print('Number of int64 columns: %s' % len(int_columns)) print('Number of float64 columns: %s' % len(float_columns)) print('Number of object columns: %s' % len(object_columns))
code
1010749/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype print('%s : %s, %s' % (feature_name, has_null, column_type)) print('\n %s/%s' % (len(columns_with_null_values), len(raw_data.columns)))
code
1010749/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) has_zero_columns = [] for i in int_columns: if 0 in raw_data[i].unique(): has_zero_columns.append(i) print(has_zero_columns)
code
1010749/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) binary_columns
code
1010749/cell_35
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) counting_columns = ['BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] month_code = raw_data['MoSold'].unique() def get_season(month_code): m = month_code if m in [3, 4, 5]: return 'Spring' elif m in [6, 7, 8]: return 'Summer' elif m in [9, 10, 11]: return 'Autumn' elif m in [12, 1, 2]: return 'Winter' seasons_series = raw_data['MoSold'].apply(get_season) seasons_series.name = 'season' year_sold = raw_data['YrSold'] year_built = raw_data['YearBuilt'] house_age_series = year_sold.subtract(year_built) house_age_series.name = 'HouseAge' garage_year_built = raw_data['GarageYrBlt'] year_built = raw_data['YearBuilt'] garage_age_series = year_sold.subtract(garage_year_built) garage_age_series.name = 'GarageAge' year_remod_add = raw_data['YearRemodAdd'] year_built = raw_data['YearBuilt'] remod_recency_series = year_sold.subtract(year_remod_add) remod_recency_series.name = 'RemodRecency' recent_df = raw_data recent_df = recent_df.assign(garage_age=garage_age_series) recent_df = recent_df.assign(remod_recency=remod_recency_series) recent_df = recent_df.assign(house_age=house_age_series) recent_df = recent_df.assign(season=seasons_series) categorical_columns = categorical_columns + ['season'] numerical_columns = numerical_columns + ['garage_age', 'remod_recency', 'house_age'] recent_df[categorical_columns] = recent_df[categorical_columns].fillna('None') import matplotlib.pyplot as plt plt.style.use("ggplot") for c in (categorical_columns + bounded_columns + counting_columns): if c in temporal_columns and c != "MoSold": continue means = recent_df[["SalePrice",c]].groupby(c).mean().sort_values("SalePrice",ascending=False) errors = recent_df[["SalePrice",c]].groupby(c).std() barplot = means.plot.bar(yerr=errors) plt.title(c) plt.show() data = recent_df iv = 'CentralAir' dv = 'SalePrice' for c in categorical_columns + bounded_columns + counting_columns: if c in temporal_columns: continue data.boxplot(column=dv, by=c, vert=True, grid=True) plt.xlabel('Sale Price') plt.ylabel(c) plt.title('') plt.suptitle('') plt.show()
code
1010749/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) print(numerical_columns)
code
1010749/cell_37
[ "text_plain_output_1.png" ]
from statsmodels.stats.weightstats import ztest import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype for c in columns_with_null_values: counter = raw_data[c].value_counts(dropna=False) try: counts = counter[None] except: counts = counter[float('nan')] length = len(raw_data) usable = length - counts column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) numerical_columns, categorical_columns = ([], []) temporal_columns = ['GarageYrBlt', 'YearBuilt', 'YearRemodAdd', 'MoSold', 'YrSold'] from_int = ['MSSubClass'] categorical_columns = object_columns + from_int + temporal_columns numerical_columns = list(filter(lambda x: x not in temporal_columns + ['MSSubClass'], int_columns)) + list(filter(lambda x: x not in temporal_columns, float_columns)) bounded_columns = ['OverallQual', 'OverallCond'] binary_columns = [] for c in categorical_columns: classes = raw_data[c].unique() length = len(classes) if length == 2: binary_columns.append(c) counting_columns = ['BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'Fireplaces', 'GarageCars'] month_code = raw_data['MoSold'].unique() def get_season(month_code): m = month_code if m in [3, 4, 5]: return 'Spring' elif m in [6, 7, 8]: return 'Summer' elif m in [9, 10, 11]: return 'Autumn' elif m in [12, 1, 2]: return 'Winter' seasons_series = raw_data['MoSold'].apply(get_season) seasons_series.name = 'season' year_sold = raw_data['YrSold'] year_built = raw_data['YearBuilt'] house_age_series = year_sold.subtract(year_built) house_age_series.name = 'HouseAge' garage_year_built = raw_data['GarageYrBlt'] year_built = raw_data['YearBuilt'] garage_age_series = year_sold.subtract(garage_year_built) garage_age_series.name = 'GarageAge' year_remod_add = raw_data['YearRemodAdd'] year_built = raw_data['YearBuilt'] remod_recency_series = year_sold.subtract(year_remod_add) remod_recency_series.name = 'RemodRecency' recent_df = raw_data recent_df = recent_df.assign(garage_age=garage_age_series) recent_df = recent_df.assign(remod_recency=remod_recency_series) recent_df = recent_df.assign(house_age=house_age_series) recent_df = recent_df.assign(season=seasons_series) categorical_columns = categorical_columns + ['season'] numerical_columns = numerical_columns + ['garage_age', 'remod_recency', 'house_age'] recent_df[categorical_columns] = recent_df[categorical_columns].fillna('None') from statsmodels.stats.weightstats import ztest ztest_dictionary = {} for c in categorical_columns + bounded_columns + counting_columns: if c in temporal_columns: continue subclasses = recent_df[c].unique() delete = [] ttests, pvalues, combinations = ([], [], []) for sc in subclasses: for scsc in subclasses: if scsc in delete or scsc == sc: continue sample_one = recent_df[recent_df[c] == sc]['SalePrice'] sample_two = recent_df[recent_df[c] == scsc]['SalePrice'] ttest = ztest(sample_one, sample_two)[0] pvalue = ztest(sample_one, sample_two)[1] combination = '%s * %s' % (sc, scsc) ttests.append(ttest) pvalues.append(pvalue) combinations.append(combination) d = {'t-test': ttests, 'p-value': pvalues} ztest_dictionary[c] = pd.DataFrame(index=combinations, data=d)
code
1010749/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') column_full_names = {'MSSubClass': 'The Building Class', 'MSZoning': 'The General Zoning Classification', 'SalePrice': 'Sale Price', 'LotFrontage': 'Linear feet of street connected to property', 'LotArea': 'Lot size in square feet', 'Street': 'Type of Road Access', 'Alley': 'Type of Alley Access', 'LotShape': 'General shape of property', 'LandContour': 'Flatness of The Property', 'Utilities': 'Type of utilitists available', 'LotConfig': 'Lot configuration', 'LandSlope': 'Slope of property', 'Neighborhood': 'Physical locations within Ames city limits', 'Condition1': 'Proximity to main road or railroad', 'Condition2': 'Proximity to main road or railroad (if a second is present)', 'BldgType': 'Type of dwelling', 'HouseStyle': 'Style of dwelling', 'OverallQual': 'Overall material and finish quality (1 - 10 Likert scale)', 'OverallCond': 'Overall condition rating (1 - 9 Likert scale)', 'RoofStyle': 'Type of roof', 'RoofMatl': 'Roof material', 'Exterior1st': 'Exterior covering on house', 'Exterior2nd': 'Exterior covering on house (if more than one material)', 'MasVnrType': 'Masonry veneer type', 'MasVnrArea': 'Masonry veneer area in square feet', 'ExterQual': 'Exterior material quality', 'ExterCond': 'Present condition of the material on the exterior', 'Foundation': 'Type of foundation', 'BsmtQual': 'Height of the basement', 'BsmtCond': 'General condition of the basement', 'BsmtExposure': 'Walkout or garden level basement walls', 'BsmtFinType1': 'Quality of basement finished area', 'BsmtFinSF1': 'Type 1 finished square feet', 'BsmtFinType2': 'Quality of second finished area (if present)', 'BsmtFinSF2': 'Type 2 finished square feet', 'BsmtUnfSF': 'Unfinished square feet of basement area', 'TotalBsmtSF': 'Total square feet of basement area', 'Heating': 'Type of heating', 'HeatingQC': 'Heating quality and condition', 'CentralAir': 'Central air conditioning', 'Electrical': 'Electrical system', '1stFlrSF': 'First Floor square feet', '2ndFlrSF': 'Second floor square feet', 'LowQualFinSF': 'Low quality finished square feet (all floors)', 'GrLivArea': 'Above grade (ground) living area square feet', 'BsmtFullBath': 'Basement full bathrooms', 'BsmtHalfBath': 'Basement half bathrooms', 'FullBath': 'Full bathrooms above grade', 'HalfBath': 'Half baths above grade', 'Bedroom': 'Number of bedrooms above basement level', 'Kitchen': 'Number of kitchens', 'KitchenQual': 'Kitchen quality', 'TotRmsAbvGrd': 'Total rooms above grade (does not include bathrooms)', 'Functional': 'Home functionality rating', 'Fireplaces': 'Number of fireplaces', 'FireplaceQu': 'Fireplace quality', 'GarageType': 'Garage location', 'GarageYrBlt': 'Year garage was built', 'GarageFinish': 'Interior finish of the garage', 'GarageCars': 'Size of garage in car capacity', 'GarageArea': 'Size of garage in square feet', 'GarageQual': 'Garage quality', 'GarageCond': 'Garage condition', 'PavedDrive': 'Paved driveway', 'WoodDeckSF': 'Wood deck area in square feet', 'OpenPorchSF': 'Open porch area in square feet', 'EnclosedPorch': 'Enclosed porch area in square feet', '3SsnPorch': 'Three season porch area in square feet', 'ScreenPorch': 'Screen porch area in square feet', 'PoolArea': 'Pool area in square feet', 'PoolQC': 'Pool quality', 'Fence': 'Fence quality', 'MiscFeature': 'Miscellaneous feature not covered in other categories', 'MiscVal': 'Value of miscellaneous feature ($)', 'MoSold': 'Month Sold', 'YrSold': 'Year Sold', 'YearBuilt': 'Original construction date', 'YearRemodAdd': 'Remodel date', 'SaleType': 'Type of sale', 'SaleCondition': 'Condition of sale'} columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') columns_with_null_values = [] for c in columns: f = c feature_name = str(f) has_null = str(any(raw_data[f].isnull().values)) if has_null == 'True': columns_with_null_values.append(f) column_type = raw_data[f].dtype column_names = raw_data.columns.drop('Id') int_columns, float_columns, object_columns = ([], [], []) for c in column_names: column_dtype = str(raw_data.dtypes[c]) if column_dtype == 'int64': int_columns.append(c) elif column_dtype == 'object': object_columns.append(c) elif column_dtype == 'float64': float_columns.append(c) for f in float_columns: print(f + ' ' + column_full_names[f])
code
1010749/cell_5
[ "image_output_11.png", "image_output_24.png", "image_output_46.png", "image_output_25.png", "image_output_47.png", "image_output_17.png", "image_output_30.png", "image_output_14.png", "image_output_39.png", "image_output_28.png", "image_output_23.png", "image_output_34.png", "image_output_13.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_18.png", "image_output_21.png", "image_output_52.png", "image_output_7.png", "image_output_56.png", "image_output_31.png", "image_output_20.png", "image_output_32.png", "image_output_53.png", "image_output_4.png", "image_output_51.png", "image_output_42.png", "image_output_35.png", "image_output_41.png", "image_output_36.png", "image_output_8.png", "image_output_37.png", "image_output_16.png", "image_output_27.png", "image_output_54.png", "image_output_6.png", "image_output_45.png", "image_output_12.png", "image_output_22.png", "image_output_55.png", "image_output_3.png", "image_output_29.png", "image_output_44.png", "image_output_43.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_33.png", "image_output_50.png", "image_output_15.png", "image_output_49.png", "image_output_9.png", "image_output_19.png", "image_output_38.png", "image_output_26.png" ]
import pandas as pd import pandas as pd raw_data = pd.read_csv('../input/train.csv') columns = raw_data.columns.tolist() if 'Id' in columns: columns.remove('Id') print(columns) print('Number of Features: %s' % len(columns))
code
50220357/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') plt.figure(figsize=(10, 10)) sn.heatmap(train.isnull(), yticklabels=False, cbar=False)
code
50220357/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) plt.figure(figsize=(10, 5)) sn.barplot(x='AgeGroup', y='Survived', data=train)
code
50220357/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') test.describe(include='all')
code
50220357/cell_44
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train.fillna({'Embarked': 'S'}, inplace=True) combine = [train, test] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train['Title'], train['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace(['Countess', 'Sir'], 'Royal') dataset['Title'] = dataset['Title'].replace(['Mlle', 'Ms'], 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train[['Title', 'Survived']].groupby(['Title'], as_index=False).mean()
code
50220357/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') sn.barplot(x='SibSp', y='Survived', data=train) print('Percentage of SibSp = 0 who survived:', train['Survived'][train['SibSp'] == 0].value_counts(normalize=True)[1] * 100) print('Percentage of SibSp = 1 who survived:', train['Survived'][train['SibSp'] == 1].value_counts(normalize=True)[1] * 100) print('Percentage of SibSp = 2 who survived:', train['Survived'][train['SibSp'] == 2].value_counts(normalize=True)[1] * 100) print('Percentage of SibSp = 3 who survived:', train['Survived'][train['SibSp'] == 3].value_counts(normalize=True)[1] * 100)
code
50220357/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) sn.barplot(x='Parch', y='Survived', data=train) plt.show()
code
50220357/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) sn.barplot(x='Pclass', y='Survived', data=train) print('Percentage of Pclass = 1 who survived:', train['Survived'][train['Pclass'] == 1].value_counts(normalize=True)[1] * 100) print('Percentage of Pclass = 2 who survived:', train['Survived'][train['Pclass'] == 2].value_counts(normalize=True)[1] * 100) print('Percentage of Pclass = 3 who survived:', train['Survived'][train['Pclass'] == 3].value_counts(normalize=True)[1] * 100)
code
50220357/cell_48
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train.fillna({'Embarked': 'S'}, inplace=True) combine = [train, test] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train['Title'], train['Sex']) title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7} train['AgeGroup'] = train['AgeGroup'].map(age_mapping) test['AgeGroup'] = test['AgeGroup'].map(age_mapping) train.head()
code
50220357/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum()
code
50220357/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') print(train.columns.values)
code
50220357/cell_45
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train.fillna({'Embarked': 'S'}, inplace=True) combine = [train, test] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train['Title'], train['Sex']) title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) train.head()
code
50220357/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.head(10)
code
50220357/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) print('Number of people embarking in Southampton (S):') southampton = train[train['Embarked'] == 'S'].shape[0] print(southampton) print('Number of people embarking in Cherbourg (C):') cherbourg = train[train['Embarked'] == 'C'].shape[0] print(cherbourg) print('Number of people embarking in Queenstown (Q):') queenstown = train[train['Embarked'] == 'Q'].shape[0] print(queenstown)
code
50220357/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') sn.barplot(x='Sex', y='Survived', data=train) print('Percentage of females who survived:', train['Survived'][train['Sex'] == 'female'].value_counts(normalize=True)[1] * 100) print('Percentage of males who survived:', train['Survived'][train['Sex'] == 'male'].value_counts(normalize=True)[1] * 100)
code
50220357/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train.fillna({'Embarked': 'S'}, inplace=True) combine = [train, test] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train['Title'], train['Sex'])
code
50220357/cell_53
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sn train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') pd.isnull(train).sum() train['Age'] = train['Age'].fillna(-0.5) test['Age'] = test['Age'].fillna(-0.5) bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf] labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels) test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels) train.drop(['Cabin'], axis=1, inplace=True) test.drop(['Cabin'], axis=1, inplace=True) train.drop(['Ticket'], axis=1, inplace=True) test.drop(['Ticket'], axis=1, inplace=True) train.fillna({'Embarked': 'S'}, inplace=True) combine = [train, test] for dataset in combine: dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False) pd.crosstab(train['Title'], train['Sex']) title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6} for dataset in combine: dataset['Title'] = dataset['Title'].map(title_mapping) dataset['Title'] = dataset['Title'].fillna(0) age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7} train['AgeGroup'] = train['AgeGroup'].map(age_mapping) test['AgeGroup'] = test['AgeGroup'].map(age_mapping) train = train.drop(['Age'], axis=1) test = test.drop(['Age'], axis=1) train.drop(['Name'], axis=1, inplace=True) test.drop(['Name'], axis=1, inplace=True) sex_mapping = {'male': 0, 'female': 1} train['Sex'] = train['Sex'].map(sex_mapping) test['Sex'] = test['Sex'].map(sex_mapping) train.head()
code
50220357/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.describe(include='all')
code
74052542/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features))
code
74052542/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features)) fig, ax = plt.subplots(figsize=(6, 6)) bars = ax.bar(train["claim"].value_counts().index, train["claim"].value_counts().values, color='darkorange', edgecolor="black", width=0.4) ax.set_title("Claim (target) values distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Claim (target) value", fontsize=14, labelpad=10) ax.set_xticks(train["claim"].value_counts().index) ax.tick_params(axis="both", labelsize=12) fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(9, 8), sharex=True) ax[0].scatter(train['f10'], train['f15'], ec='k', color='skyblue') ax[0].set_ylabel('f15') ax[0].set_title('Relation b/w f10 & f15') ax[1].scatter(train['f3'], train['f4'], ec='k', color='skyblue') ax[1].set_xlabel('f3') ax[1].set_ylabel('f4') ax[1].set_title('Relation b/w f3 & f4') plt.tight_layout() plt.show()
code
74052542/cell_6
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') display(train.head()) display(test.head()) display(sub.head())
code
74052542/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74052542/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') print('size of train: ', train.shape) print('size of test: ', test.shape) print('size of submission: ', sub.shape)
code
74052542/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features)) fig, ax = plt.subplots(figsize=(6, 6)) bars = ax.bar(train["claim"].value_counts().index, train["claim"].value_counts().values, color='darkorange', edgecolor="black", width=0.4) ax.set_title("Claim (target) values distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Claim (target) value", fontsize=14, labelpad=10) ax.set_xticks(train["claim"].value_counts().index) ax.tick_params(axis="both", labelsize=12) plt.figure(figsize=(7, 6)) sns.distplot(train['claim'])
code
74052542/cell_8
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') print(train.info()) print(test.info())
code
74052542/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features)) train['claim'].value_counts()
code
74052542/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features)) fig, ax = plt.subplots(figsize=(6, 6)) bars = ax.bar(train['claim'].value_counts().index, train['claim'].value_counts().values, color='darkorange', edgecolor='black', width=0.4) ax.set_title('Claim (target) values distribution', fontsize=20, pad=15) ax.set_ylabel('Amount of values', fontsize=14, labelpad=15) ax.set_xlabel('Claim (target) value', fontsize=14, labelpad=10) ax.set_xticks(train['claim'].value_counts().index) ax.tick_params(axis='both', labelsize=12)
code
74052542/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values features = list(train.columns) list(enumerate(features)) train.describe()
code
74052542/cell_12
[ "text_html_output_2.png", "text_html_output_1.png", "text_html_output_3.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('/kaggle/input/tabular-playground-series-sep-2021/sample_solution.csv') train_original = train.copy() test_original = test.copy() train.drop('id', axis=1, inplace=True) test.drop('id', axis=1, inplace=True) train.columns.values
code
34122127/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cols_list = [] for j in range(8): for i in range(8): cols_list.append(f'S{i}R{j}') cols_list.append('target') df = pd.read_csv('/kaggle/input/emg-4/0.csv', header=None) df.columns = cols_list df
code
34122127/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34122127/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) cols_list = [] for j in range(8): for i in range(8): cols_list.append(f'S{i}R{j}') cols_list.append('target') df = pd.read_csv('/kaggle/input/emg-4/0.csv', header=None) df.columns = cols_list df pd.wide_to_long(df.reset_index(), ['S1', 'S2', 'S3', 'S4', 'S5', 'S6', 'S7', 'S0'], i=['index', 'target'], j='R', sep='R')
code
106209373/cell_42
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(13, 8)) sns.countplot(x='arrival_date_month', data=hotel, hue='is_canceled', palette='ocean')
code
106209373/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['is_canceled'].value_counts()
code
106209373/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) hotel['arrival_date_year'].unique()
code
106209373/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['country'].value_counts(normalize=True)
code
106209373/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(13, 8)) sns.countplot(x='market_segment', data=hotel, hue='is_canceled', palette='prism_r')
code
106209373/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['arrival_date_year'].unique()
code
106209373/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel.head()
code
106209373/cell_11
[ "text_html_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] confirmed_bookings['arrival_date_year'] = hotel['arrival_date_year'] Last = confirmed_bookings['arrival_date_year'].value_counts().sort_index() Last
code
106209373/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['arrival_date_month'].value_counts()
code
106209373/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(10, 8)) sns.countplot(x='deposit_type', data=hotel, hue='is_canceled', palette='gist_rainbow_r')
code
106209373/cell_49
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(18, 9)) sns.lineplot(data=hotel, x='arrival_date_month', y='arrival_date_year')
code
106209373/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['market_segment'].value_counts(normalize=True)
code
106209373/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['customer_type'].value_counts()
code
106209373/cell_38
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(10, 8)) sns.countplot(data=hotel, x='total_of_special_requests', hue='is_canceled', palette='cool_r')
code
106209373/cell_47
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(7, 8)) sns.countplot(x='reservation_status', data=hotel, hue='is_canceled', palette='afmhot')
code
106209373/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel.info()
code
106209373/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['reservation_status'].value_counts(normalize=True)
code
106209373/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] hotel['meal'].value_counts().unique
code
106209373/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') print(round(100 * (hotel.isnull().sum() / len(hotel.index)), 2))
code
106209373/cell_36
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns hotel = pd.read_csv('../input/hotel-booking-demand/hotel_bookings.csv') hotel = hotel.drop(['agent', 'company'], axis=1) confirmed_bookings = hotel[hotel.is_canceled == '0'] plt.figure(figsize=(8, 8)) sns.countplot(data=hotel, x='hotel', hue='is_canceled', palette='Set1_r')
code
2032996/cell_13
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.cross_validation import train_test_split from sklearn.ensemble import GradientBoostingRegressor from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time Time_0 = time.time() train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') y_train = train['log_price'] = np.log(train['price'] + 1) def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset['brand_name'].fillna(value='missing', inplace=True) dataset['item_description'].fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): MIN_DF_COUNT = 10 MAX_DF_COUNT = 10000 cv = CountVectorizer(min_df=MIN_DF_COUNT, max_df=MAX_DF_COUNT) name = cv.fit_transform(dataset['name']) MIN_DF_TF = 10 MAX_DF_TF = 51000 MAX_FEATURES_TF = 51000 tv = TfidfVectorizer(max_features=MAX_FEATURES_TF, min_df=MIN_DF_TF, max_df=MAX_DF_TF, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] del train del test start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_test, y_test): y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2))) ridge_model_1 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None) ridge_model_2 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='sag', random_state=None) ridge_model_3 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='lsqr', random_state=None) gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=5, learning_rate=0.9, subsample=0.9) start_time = time.time() X_t, X_v, y_t, y_v = train_test_split(X_train, y_train, test_size=0.2) ridge_model_1.fit(X_train, y_train) model_testing(ridge_model_1, X_test=X_v, y_test=y_v) def create_submission(model, test=X_test, submission=submission, path='./predictions.csv'): predictions = model.predict(test) predictions = pd.Series(np.exp(predictions) - 1) submission['price'] = predictions submission.to_csv(path, index=False) start_time = time.time() create_submission(ridge_model_1) print('TIME:', time.time() - start_time) print('TOTAL TIME:', time.time() - Time_0)
code
2032996/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from scipy.sparse import csr_matrix, hstack import time import re import math from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler, LabelBinarizer from sklearn.cross_validation import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics import mean_squared_log_error from sklearn.linear_model import Ridge from sklearn.ensemble import GradientBoostingRegressor import xgboost as xgb seed = 90
code
2032996/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.cross_validation import train_test_split from sklearn.ensemble import GradientBoostingRegressor from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.linear_model import Ridge from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import numpy as np import pandas as pd import time Time_0 = time.time() train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') y_train = train['log_price'] = np.log(train['price'] + 1) def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset['brand_name'].fillna(value='missing', inplace=True) dataset['item_description'].fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): MIN_DF_COUNT = 10 MAX_DF_COUNT = 10000 cv = CountVectorizer(min_df=MIN_DF_COUNT, max_df=MAX_DF_COUNT) name = cv.fit_transform(dataset['name']) MIN_DF_TF = 10 MAX_DF_TF = 51000 MAX_FEATURES_TF = 51000 tv = TfidfVectorizer(max_features=MAX_FEATURES_TF, min_df=MIN_DF_TF, max_df=MAX_DF_TF, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) nrow_train = train.shape[0] merge: pd.DataFrame = pd.concat([train, test]) submission: pd.DataFrame = test[['test_id']] del train del test start_time = time.time() merge = handle_missing(merge) merge = split_cat(merge) cat1, cat2, cat3, brand_name = label_maker(merge) X_dummies = get_dums(merge) name, description = text_processing(merge) sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() X_train = sparse_merge[:nrow_train] X_test = sparse_merge[nrow_train:] def model_testing(model, X_test, y_test): y_pred = model.predict(X_test) error = rmsle(y_test, y_pred) def rmsle(y, y0): assert len(y) == len(y0) return np.sqrt(np.mean(np.power(np.log1p(y) - np.log1p(y0), 2))) ridge_model_1 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None) ridge_model_2 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='sag', random_state=None) ridge_model_3 = Ridge(alpha=5.0, fit_intercept=True, normalize=False, copy_X=True, max_iter=None, tol=0.001, solver='lsqr', random_state=None) gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=5, learning_rate=0.9, subsample=0.9) start_time = time.time() print('train test splitting...') X_t, X_v, y_t, y_v = train_test_split(X_train, y_train, test_size=0.2) print('training model...') print('1') ridge_model_1.fit(X_train, y_train) model_testing(ridge_model_1, X_test=X_v, y_test=y_v) print('training model...') print('2') print('training model...') print('3') print('TIME:', time.time() - start_time)
code
2032996/cell_7
[ "text_plain_output_1.png" ]
from scipy.sparse import csr_matrix, hstack from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler,LabelBinarizer import pandas as pd import time Time_0 = time.time() train = pd.read_csv('../input/train.tsv', sep='\t') test = pd.read_csv('../input/test.tsv', sep='\t') def handle_missing(dataset): dataset['category_name'].fillna(value='NA/NA/NA', inplace=True) dataset['brand_name'].fillna(value='missing', inplace=True) dataset['item_description'].fillna(value='missing', inplace=True) return dataset def split_cat(dataset): dataset['cat1'], dataset['cat2'], dataset['cat3'] = zip(*dataset['category_name'].str.split('/', 2)) return dataset def label_maker(dataset): lb = LabelBinarizer(sparse_output=True) cat1 = lb.fit_transform(dataset['cat1']) cat2 = lb.fit_transform(dataset['cat2']) cat3 = lb.fit_transform(dataset['cat3']) brand_name = lb.fit_transform(dataset['brand_name']) del lb return (cat1, cat2, cat3, brand_name) def get_dums(dataset): X_dummies = csr_matrix(pd.get_dummies(dataset[['item_condition_id', 'shipping']], sparse=True).values) return X_dummies def text_processing(dataset): MIN_DF_COUNT = 10 MAX_DF_COUNT = 10000 cv = CountVectorizer(min_df=MIN_DF_COUNT, max_df=MAX_DF_COUNT) name = cv.fit_transform(dataset['name']) MIN_DF_TF = 10 MAX_DF_TF = 51000 MAX_FEATURES_TF = 51000 tv = TfidfVectorizer(max_features=MAX_FEATURES_TF, min_df=MIN_DF_TF, max_df=MAX_DF_TF, ngram_range=(1, 3), stop_words='english') description = tv.fit_transform(dataset['item_description']) del cv, tv return (name, description) start_time = time.time() print('Handle Missing...') merge = handle_missing(merge) print('splitting cat...') merge = split_cat(merge) print('making labels...') cat1, cat2, cat3, brand_name = label_maker(merge) print('getting dummies...') X_dummies = get_dums(merge) print('processing text...') name, description = text_processing(merge) print('stacking train...') sparse_merge = hstack((cat1, cat3, cat3, brand_name, X_dummies, name, description)).tocsr() print('TIME:', time.time() - start_time)
code
129023624/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape train_df['contractions_converted'][9]
code
129023624/cell_56
[ "text_html_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict, KFold, GridSearchCV from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) max_len = round(max(token_sentence_length)) max_len tokenized_feature = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs = tokenized_feature['input_ids'] train_padded_docs = np.array(padded_inputs) labels = np.array(train_df['target']) def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm kfold = KFold(n_splits=10, shuffle=True, random_state=42) model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) for train_index, test_index in kfold.split(train_padded_docs): X_train, X_test = (train_padded_docs[train_index], train_padded_docs[test_index]) y_train, y_test = (labels[train_index], labels[test_index]) model = create_rnn_model(X_train.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=10, batch_size=64, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=0) tokenized_feature_test_data = xlm_tokenizer.batch_encode_plus(test_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs_test = tokenized_feature_test_data['input_ids'] predictions = (model.predict(padded_inputs_test) > 0.5).astype(int) sample_submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') sample_submission['target'] = predictions sample_submission
code
129023624/cell_34
[ "text_plain_output_1.png" ]
from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] print('max: ', max(token_sentence_length)) print('min: ', min(token_sentence_length)) avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.figure(figsize=(20, 8)) plt.hist(token_sentence_length, rwidth=0.9) plt.xlabel('Sequence Length', fontsize=18) plt.ylabel('No of Samples', fontsize=18) plt.xticks(fontsize=14) plt.yticks(fontsize=14)
code
129023624/cell_44
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int)
code
129023624/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.head()
code
129023624/cell_29
[ "image_output_1.png" ]
!pip install transformers
code
129023624/cell_2
[ "text_plain_output_1.png" ]
!pip install contractions
code
129023624/cell_54
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Activation, Flatten, \ from keras.layers import LSTM, GRU, SimpleRNN from keras.models import Sequential,Model,load_model from sklearn.model_selection import KFold from sklearn.model_selection import train_test_split,cross_val_score, cross_val_predict, KFold, GridSearchCV from transformers import XLMRobertaTokenizer, TFAutoModel, TFAutoModelForSequenceClassification, TFXLMRobertaModel, XLMRobertaModel import contractions import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train_df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train_df.shape nan_count = train_df.isna().sum() nan_count train_df.drop(['location'], axis=1) train_df.duplicated(['text', 'target']).sum() train_df = train_df.drop_duplicates(['text', 'target']) train_df.shape def remove_mentions(data_df): mentions_removed = re.sub('@[A-Za-z0-9_]+', '', data_df) return mentions_removed def remove_hashtags(data_df): hashtags_removed = re.sub('#[A-Za-z0-9_]+', '', data_df) return hashtags_removed def remove_urls(data_df): hashtags_removed = re.sub('http[s]?://(?:[a-zA-Z]|[0-9]|[\n]|[$-_@.&+\\]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', data_df) return hashtags_removed def convert_contractions(data_df): contractions_converted = contractions.fix(data_df) return contractions_converted train_df['text'] = train_df['text'].astype(str) train_df['mentions_removed'] = train_df['text'].apply(remove_mentions).tolist() train_df['hashtags_removed'] = train_df['mentions_removed'].apply(remove_hashtags).tolist() train_df['url_removed'] = train_df['hashtags_removed'].apply(remove_urls).tolist() train_df['lower_cased'] = train_df['url_removed'].apply(lambda x: x.lower()) train_df['contractions_converted'] = train_df['lower_cased'].apply(convert_contractions).tolist() MODEL_TYPE = 'xlm-roberta-large' xlm_tokenizer = XLMRobertaTokenizer.from_pretrained(MODEL_TYPE) xlm_model = TFAutoModel.from_pretrained(MODEL_TYPE) embedding_matrix = xlm_model.get_input_embeddings().weight tokenized_feature_raw = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True) token_sentence_length = [len(x) for x in tokenized_feature_raw['input_ids']] avg_length = sum(token_sentence_length) / train_df.shape[0] MAX_LEN = max(token_sentence_length) import matplotlib.pyplot as plt plt.xticks(fontsize=14) plt.yticks(fontsize=14) max_len = round(max(token_sentence_length)) max_len tokenized_feature = xlm_tokenizer.batch_encode_plus(train_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs = tokenized_feature['input_ids'] train_padded_docs = np.array(padded_inputs) labels = np.array(train_df['target']) def create_rnn_model(input_shape): model_xlm = Sequential() model_xlm.add(Embedding(250002, 1024, trainable=False, weights=[embedding_matrix.numpy()])) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GRU(512, return_sequences=True)) model_xlm.add(GlobalMaxPool1D()) model_xlm.add(Dense(30, activation='relu')) model_xlm.add(Dropout(0.4)) model_xlm.add(Dense(1, activation='sigmoid')) return model_xlm kfold = KFold(n_splits=10, shuffle=True, random_state=42) model = create_rnn_model(X_train_xlm.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) batch_size = 64 epochs = 10 history_model_xlm = model.fit(X_train_xlm, y_train_xlm, validation_split=0.2, batch_size=batch_size, epochs=epochs) y_pred = (model.predict(X_test_xlm) > 0.5).astype(int) for train_index, test_index in kfold.split(train_padded_docs): X_train, X_test = (train_padded_docs[train_index], train_padded_docs[test_index]) y_train, y_test = (labels[train_index], labels[test_index]) model = create_rnn_model(X_train.shape[1:]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, y_train, epochs=10, batch_size=64, verbose=0) loss, accuracy = model.evaluate(X_test, y_test, verbose=0) tokenized_feature_test_data = xlm_tokenizer.batch_encode_plus(test_df['contractions_converted'], add_special_tokens=True, padding='max_length', truncation=True, max_length=max_len, return_attention_mask=True, return_tensors='tf') padded_inputs_test = tokenized_feature_test_data['input_ids'] predictions = (model.predict(padded_inputs_test) > 0.5).astype(int) predictions
code