kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
9,877,165
def fillna_using_knn_imputer(df): imputer = KNNImputer(n_neighbors=5, weights='uniform', metric='nan_euclidean') df['Sex'] = df['Sex'].factorize() [0] df['Embarked'] = df['Embarked'].factorize() [0] df['Kind'] = df['Kind'].factorize() [0] X_train = df[~df['Survived'].isnull() ].drop('Survived', axis=1) X_test = df[df['Survived'].isnull() ].drop('Survived', axis=1) X_train_trans = pd.DataFrame(imputer.fit_transform(X_train), columns=X_train.columns, index=X_train.index) X_test_trans = pd.DataFrame(imputer.transform(X_test), columns=X_test.columns, index=X_test.index) dff = pd.concat([X_train_trans, X_test_trans], axis=0) dff['Survived'] = df['Survived'] dff = dff.sort_index() return dff df = fillna_using_knn_imputer(df) df.info()<normalization>
x_train = np.concatenate(( x_train, x_train_mnist)) y_train = np.concatenate(( y_train, y_train_mnist))
Digit Recognizer
9,877,165
def encode_cols(df, cols=['Pclass', 'Embarked']): for col in cols: dumm = pd.get_dummies(data=df[col], prefix=col) df = pd.concat([df, dumm], axis=1) df = df.drop(col, axis=1) return df def scale_all_features(df): X = df.drop('Survived', axis=1) scaler = MinMaxScaler() X = pd.DataFrame(scaler.fit_transform(X), columns=X.columns, index=X.index) df = pd.concat([X, df['Survived']], axis=1) return df, scaler df = encode_cols(df) df, _ = scale_all_features(df) df.info()<split>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.10, width_shift_range=0.1, height_shift_range=0.1 )
Digit Recognizer
9,877,165
def split_to_train_test_X_y(df): X = df[~df['Survived'].isna() ].drop('Survived', axis=1) y = df[~df['Survived'].isna() ]['Survived'] X_test = df[df['Survived'].isna() ].drop('Survived', axis=1) return X, y, X_test X, y, X_test = split_to_train_test_X_y(df )<init_hyperparams>
np.random.seed(0 )
Digit Recognizer
9,877,165
class ML_Classifier_Switcher(object): def pick_model(self, model_name): self.param_grid = None method_name = str(model_name) method = getattr(self, method_name, lambda: "Invalid ML Model") return method() def SVM(self): self.param_grid = {'kernel': ['rbf', 'sigmoid', 'linear'], 'C': np.logspace(-2, 2, 10), 'gamma': np.logspace(-5, 1, 14)} return SVC() def XGR(self): self.param_grid = {'gamma': np.logspace(-5, 1, 7), 'subsample': [0.5, 0.75, 1.0], 'colsample_bytree': [0.5, 0.75, 1.0], 'eta': [0.1, 0.5, 0.9], 'max_depth': [3, 5]} return XGBClassifier(random_state=42, nthread=7, use_label_encoder=False, eval_metric='error', tree_method = "hist") def RF(self): self.param_grid = { 'n_estimators': [50, 100, 200, 300], 'max_features': ['auto'], 'criterion': ['entropy'], 'max_depth': [5, 10], 'min_samples_split': [5], 'min_samples_leaf': [1] } return RandomForestClassifier(random_state=42, n_jobs=-1) def LR(self): self.param_grid = {'solver': ['newton-cg', 'lbfgs', 'liblinear'], 'penalty': ['l2'], 'C': [100, 10, 1.0, 0.1, 0.01]} return LogisticRegression(n_jobs=None, random_state=42) def KNN(self): self.param_grid = { 'n_neighbors': list(range(1, 5)) , 'weights': ['uniform', 'distance'], 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'], 'leaf_size': list(range(1, 10)) , 'p': [1, 2] } return KNeighborsClassifier()<compute_train_metric>
input_shape =(28,28,1) model = Sequential() model.add(Conv2D(filters = 96, kernel_size =(3, 3), padding='same', activation='relu', input_shape=input_shape)) model.add(BatchNormalization()) model.add(Conv2D(filters = 96,kernel_size =(3, 3), padding='same', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(filters = 192, kernel_size =(3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(filters = 256, kernel_size =(3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Conv2D(filters = 256, kernel_size =(3,3), activation='relu', padding='same')) model.add(BatchNormalization()) model.add(Conv2D(filters = 256, kernel_size =(3,3), activation='relu', padding='same')) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(units=10, activation='softmax')) model.compile(optimizer=OPTIMIZER, loss="categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
9,877,165
def cross_validate(X, y, model_name='RF', cv=5, scoring='accuracy', gridsearch=True): switcher = ML_Classifier_Switcher() model = switcher.pick_model(model_name) if gridsearch: gr = GridSearchCV(model, switcher.param_grid, scoring=scoring, cv=cv, n_jobs=-1) gr.fit(X, y) model = gr.best_estimator_ cvr = cross_val_score(model, X, y, cv=cv, scoring=scoring) cvr = pd.DataFrame(cvr ).T cvr.index = [model_name] cvr.columns = ['fold_{}'.format(x+1)for x in cvr.columns] cvr['mean'] = cvr.mean(axis=1) cvr['std'] = cvr.std(axis=1) return cvr, model<compute_train_metric>
learning_rate = ReduceLROnPlateau(monitor='val_acc', patience=2, verbose=1, factor=0.5, min_lr=1.0e-5 )
Digit Recognizer
9,877,165
def cross_validate_models(X, y, cv=5, scoring='accuracy'): models = ['LR', 'KNN', 'SVM', 'RF'] cvrs = [] bests = [] for model_name in models: print('Optimizing {} model'.format(model_name)) cvr, best = cross_validate(X, y, model_name=model_name, cv=cv, scoring=scoring, gridsearch=True) cvrs.append(cvr) bests.append(best) cvr = pd.concat(cvrs, axis=0) return cvr, bests<compute_train_metric>
EarlyStopping_cb = keras.callbacks.EarlyStopping(monitor='val_acc', patience=5, verbose=1, mode='auto' )
Digit Recognizer
9,877,165
cvr, bests = cross_validate_models(X,y) print(cvr )<save_to_csv>
csv_logger = CSVLogger('model.log' )
Digit Recognizer
9,877,165
def predict_on_test(X_test, model, submission_df, target='Survived'): preds_test = model.predict(X_test) submission_df.loc[:, target] = [int(x)for x in preds_test] submission_df.to_csv('submission.csv', index=False) return predict_on_test(X_test, bests[-1], submission_df )<load_from_csv>
x_train2, x_val2, y_train2, y_val2 = train_test_split(x_train, y_train, test_size=VALIDATION_SPLIT, random_state=0 )
Digit Recognizer
9,877,165
train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv' )<filter>
def fit_model() : model_file = './log_dir/model_file' + '.hdf5' checkpointer = ModelCheckpoint(filepath=model_file, verbose=1, monitor='val_acc', save_best_only=True) history = model.fit_generator(datagen.flow(x_train2, y_train2, batch_size=BATCH_SIZE), validation_data=(x_val2, y_val2), epochs = NB_EPOCH, verbose = 2, steps_per_epoch=x_train2.shape[0] // BATCH_SIZE , callbacks=[learning_rate, checkpointer, csv_logger, EarlyStopping_cb]) return(history )
Digit Recognizer
9,877,165
women = train_data.loc[train_data.Sex == 'female']['Survived'] print('Women survived',sum(women)/len(women)) men = train_data.loc[train_data.Sex == 'male']['Survived'] print('Men survived',sum(men)/len(men))<categorify>
history = fit_model()
Digit Recognizer
9,877,165
train_data['female'] = pd.get_dummies(train_data['Sex'])['female'] test_data['female'] = pd.get_dummies(test_data['Sex'])['female']<feature_engineering>
results = np.zeros(( x_test.shape[0], NB_CLASSES)) model_file = './log_dir/model_file' + '.hdf5' model.load_weights(model_file) results = results + model.predict(x_test) results = np.argmax(results, axis=1) results = pd.Series(results, name="Label") submission = pd.concat([pd.Series(range(1, 28001), name="ImageId"), results], axis=1) submission.to_csv("submission.csv", index=False )
Digit Recognizer
9,877,165
<filter><EOS>
!wc -l./submission.csv
Digit Recognizer
9,556,788
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<filter>
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Digit Recognizer
9,556,788
pclass1 = train_data.loc[train_data.Pclass == 1]['Survived'] print('Class1',sum(pclass1)/len(pclass1)) pclass2 = train_data.loc[train_data.Pclass == 2]['Survived'] print('Class2',sum(pclass2)/len(pclass2)) pclass3 = train_data.loc[train_data.Pclass == 3]['Survived'] print('Class3',sum(pclass3)/len(pclass3))<count_missing_values>
dataset = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') dataset
Digit Recognizer
9,556,788
sum(test_data.Pclass.isna() )<categorify>
np.any(dataset.isnull().sum() )
Digit Recognizer
9,556,788
train_data['class1'] = pd.get_dummies(train_data.Pclass)[1] test_data['class1'] = pd.get_dummies(test_data.Pclass)[1] train_data['class2'] = pd.get_dummies(train_data.Pclass)[2] test_data['class2'] = pd.get_dummies(test_data.Pclass)[2]<count_missing_values>
scaler = StandardScaler() X = scaler.fit_transform(X) X
Digit Recognizer
9,556,788
sum(test_data.SibSp.isna() )<feature_engineering>
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') X_test = test_data.values X_test.shape
Digit Recognizer
9,556,788
sibs = train_data.loc[train_data.SibSp <= 1]['Survived'] print(sum(sibs)/len(sibs)) train_data['many_sibs'] =(train_data.SibSp > 1)*1 test_data['many_sibs'] =(test_data.SibSp > 1)*1<filter>
X_test_scaled = scaler.fit_transform(X_test) X_test_scaled
Digit Recognizer
9,556,788
young = train_data.loc[train_data.Age <= 15]['Survived'] print(sum(young)/len(young)) old = train_data.loc[train_data.Age >=40]['Survived'] print(sum(old)/len(old))<feature_engineering>
datagen = ImageDataGenerator(rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1)
Digit Recognizer
9,556,788
bins = [0.42, 15, 30, 50,80] train_data['bin_age'] = pd.cut(x=train_data.Age, bins=bins) test_data['bin_age'] = pd.cut(x=test_data.Age, bins=bins )<categorify>
model_list = []
Digit Recognizer
9,556,788
train_data['young'] = pd.get_dummies(train_data.bin_age ).iloc[:,0] test_data['young'] = pd.get_dummies(test_data.bin_age ).iloc[:,0] train_data['senior'] = pd.get_dummies(train_data.bin_age ).iloc[:,3] test_data['senior'] = pd.get_dummies(test_data.bin_age ).iloc[:,3]<import_modules>
reduce_lr = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.2, min_lr=1e-6 )
Digit Recognizer
9,556,788
from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix<split>
for i in range(7): model = keras.models.Sequential([ keras.layers.Conv2D(32, kernel_size = 3, activation = 'relu', padding = 'same', input_shape = [28, 28, 1]), keras.layers.BatchNormalization() , keras.layers.Conv2D(64, kernel_size = 3, activation = 'relu', padding = 'same'), keras.layers.MaxPool2D() , keras.layers.Conv2D(32, kernel_size = 3, activation = 'relu', padding = 'same'), keras.layers.BatchNormalization() , keras.layers.Conv2D(64, kernel_size = 3, activation = 'relu', padding = 'same'), keras.layers.MaxPool2D() , keras.layers.Flatten() , keras.layers.Dropout(0.25), keras.layers.Dense(256, activation = 'relu'), keras.layers.Dropout(0.5), keras.layers.Dense(10, activation = 'softmax')]) model.compile(loss = 'sparse_categorical_crossentropy', optimizer = 'nadam', metrics = ['accuracy']) model_list.append(model)
Digit Recognizer
9,556,788
X = train_data[features] y = train_data.Survived X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0 )<choose_model_class>
history = [0] * 7 for i in range(7): X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size = 0.08) history[i] = model_list[i].fit_generator(datagen.flow(X, y, batch_size = 64), epochs = 20, validation_data =(X_valid, y_valid), callbacks = [reduce_lr]) print("CNN : {} Maximum Train Accuracy : {} Maximum Validation Accuracy : {}".format(i+1, max(history[i].history['accuracy']), max(history[i].history['val_accuracy'])) )
Digit Recognizer
9,556,788
log_reg = LogisticRegression() log_reg.fit(X_train, y_train) y_pred = log_reg.predict(X_test) y_pred<compute_test_metric>
X_test_scaled = X_test_scaled.reshape(( 28000, 28, 28, 1)) X_test_scaled.shape
Digit Recognizer
9,556,788
accuracy_score(y_pred, y_test )<compute_test_metric>
for i in range(7): ensemble_cnn_pred = ensemble_cnn_pred + model_list[i].predict(X_test_scaled )
Digit Recognizer
9,556,788
confusion_matrix(y_pred, y_test )<train_model>
np.sum(ensemble_cnn_pred[0] )
Digit Recognizer
9,556,788
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X_train, y_train) y_pred = model.predict(X_test )<compute_test_metric>
ensemble_cnn_pred = np.argmax(ensemble_cnn_pred, axis = 1) ensemble_cnn_pred[0]
Digit Recognizer
9,556,788
accuracy_score(y_pred, y_test )<compute_test_metric>
pred_df_ensemble_cnn = pd.DataFrame(columns = ['ImageId', 'Label']) pred_df_ensemble_cnn['ImageId'] = np.arange(1, 28001) pred_df_ensemble_cnn['Label'] = ensemble_cnn_pred pred_df_ensemble_cnn
Digit Recognizer
9,556,788
<feature_engineering><EOS>
pred_df_ensemble_cnn.to_csv('ens_cnn_with_aug_sub.csv', index = False )
Digit Recognizer
9,468,425
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_search_space>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf
Digit Recognizer
9,468,425
param_grid = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] } <train_model>
df=pd.read_csv(".. /input/digit-recognizer/train.csv") df.head()
Digit Recognizer
9,468,425
rfc1=RandomForestClassifier(random_state=42, max_features='log2', n_estimators= 200, max_depth=6, criterion='entropy') rfc1.fit(X_train, y_train )<save_to_csv>
df_test=pd.read_csv(".. /input/digit-recognizer/test.csv" ).values df_test[0:5]
Digit Recognizer
9,468,425
predictions = rfc1.predict(test_data[features]) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )<import_modules>
X=df.drop(["label"],axis="columns" ).values Y=df["label"].values X=X/255.0
Digit Recognizer
9,468,425
from typing import Tuple, Dict, Any import pandas as pd import numpy as np import matplotlib.pyplot as plt import tensorflow as tf import tensorflow_addons as tfa from tensorflow.keras.metrics import Mean import tensorflow.keras.backend as K import tensorflow.keras.layers<feature_engineering>
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2 )
Digit Recognizer
9,468,425
def extract_titanic_features( source: pd.DataFrame, ): features = { name: np.array(value, np.float32) for name, value in source[["Age", "Fare", "SibSp", "Parch", "Pclass"]].items() } for age in [4, 8, 12, 16, 22, 26, 30, 35, 40, 45, 50, 60]: features['at_least_' + str(age)] = np.array(source['Age'] >= age, np.float32) features['name_len'] = np.array(source["Name"].str.len() , np.float32) for title in ['Miss.', 'Master', 'Mr.', 'Mrs.']: features['is_' + title] = np.array(source['Name'].str.contains(title), np.float32) features["Sex"] = np.array(source["Sex"], np.str) features["Embarked"] = np.array(source["Embarked"], np.str) return features train_labels = titanic_train_df["Survived"] train_features = extract_titanic_features(titanic_train_df) test_features = extract_titanic_features(titanic_test_df) inputs = { k: tf.keras.Input(shape=(1,), name=k, dtype=v.dtype) for k, v in train_features.items() } preprocessed_inputs = [] numeric_inputs = {k: v for k, v in inputs.items() if v.dtype == tf.float32} def nan_mask( layer: tf.keras.layers.Layer, )-> Tuple[tf.keras.layers.Layer, tf.keras.layers.Layer]: is_nan = tf.math.is_nan(layer) finite_input = tf.where(is_nan, tf.zeros_like(layer), layer) norm_input = tf.keras.layers.BatchNormalization()(finite_input) mask = tf.cast(tf.math.logical_not(is_nan), tf.float32) return mask, norm_input finite_numeric_inputs = {} numeric_masks = {} for k, v in numeric_inputs.items() : mask_v, finite_v = nan_mask(v) preprocessed_inputs.append(mask_v) preprocessed_inputs.append(finite_v) finite_numeric_inputs[k] = finite_v numeric_masks[k] = mask_v accomp_count = tf.math.add( finite_numeric_inputs["Parch"], finite_numeric_inputs["SibSp"] ) preprocessed_inputs.append(accomp_count) gender_vocab = np.unique(train_features["Sex"]) gender_lookup = tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=gender_vocab ) gender_onehot = tf.keras.layers.experimental.preprocessing.CategoryEncoding( max_tokens=gender_lookup.vocab_size() ) gender_input = gender_onehot(gender_lookup(inputs["Sex"])) preprocessed_inputs.append(gender_input) embarked_vocab = np.array(( "C", "Q", "S")) embarked_lookup = tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=embarked_vocab ) embarked_onehot = tf.keras.layers.experimental.preprocessing.CategoryEncoding( max_tokens=embarked_lookup.vocab_size() ) embarked_input = embarked_onehot(embarked_lookup(inputs["Embarked"])) preprocessed_inputs.append(embarked_input) x = tf.keras.layers.Concatenate()(preprocessed_inputs) preprocess_model = tf.keras.Model(inputs, x) tf.keras.utils.plot_model( preprocess_model, to_file="preprocess_model.png", rankdir="LR", show_shapes=True, show_layer_names=True, )<choose_model_class>
X=X.reshape(len(X),28,28,-1) x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2 )
Digit Recognizer
9,468,425
head = preprocess_model(inputs) columns = [] activation="relu" for i in range(4): c = head c = tf.keras.layers.Dense(64, activation=activation )(c) c = tf.keras.layers.Dropout(0.5 )(c) c = tf.keras.layers.Dense(64, activation=activation )(c) c = tf.keras.layers.BatchNormalization()(c) c = tf.keras.layers.Dropout(0.2 )(c) c = tf.keras.layers.Dense(32, activation=activation )(c) columns.append(c) if len(columns)== 1: tail = columns[0] else: tail = tf.keras.layers.Add()(columns) tail = tf.keras.layers.Dense(16, activation="relu" )(tail) tail = tf.keras.layers.Dense(1, activation="sigmoid" )(tail) model = tf.keras.Model(inputs, tail) model.compile( loss=tf.losses.BinaryCrossentropy() , optimizer=tfa.optimizers.Lookahead(tf.keras.optimizers.Nadam()), metrics=["accuracy"], ) if False: model.summary() tf.keras.utils.plot_model( model, to_file="final_model.png", rankdir="LR", show_shapes=True, ) callbacks = [ tf.keras.callbacks.ProgbarLogger() , tf.keras.callbacks.EarlyStopping( monitor='val_loss', patience=10, ), ] fit_params = dict( batch_size=128, validation_freq=1, validation_split=0.1, epochs=300, verbose=0, callbacks=[callbacks], ) submit = True if submit: fit_params.update(dict( validation_freq=0, validation_split=0, epochs=125, verbose=0, )) history = model.fit( train_features, train_labels.to_numpy() , **fit_params, ) print("accuracy:", history.history['accuracy'][-1]) if not submit: print("val_accuracy:", history.history['val_accuracy'][-1]) plt.plot(history.history['accuracy'][10:]) plt.plot(history.history['loss'][10:]) if not submit: plt.plot(history.history['val_accuracy'][10:]) plt.plot(history.history['val_loss'][10:]) plt.xlabel('epoch') plt.show()<save_to_csv>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
9,468,425
test_predictions = model(test_features) test_ids = titanic_test_df["PassengerId"].to_numpy() test_hard_predictions =( np.floor(np.array(test_predictions)+ 0.5 ).astype("int" ).reshape(-1) ) pred_df = pd.Series(data=test_hard_predictions, name="Survived", index=test_ids) pred_df.to_csv( "submission.csv", index_label="PassengerId", header=["Survived"], )<load_from_csv>
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
Digit Recognizer
9,468,425
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv') train_data.info()<sort_values>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),padding='same',activation='relu',input_shape=(28,28,1)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPooling2D() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(32,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPooling2D() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(64,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(64,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dense(1024, activation="relu"), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(10, activation="softmax") ]) optimizer=tf.keras.optimizers.Adam(lr=0.001) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
Digit Recognizer
9,468,425
missing_train_total = train_data.isnull().sum().sort_values(ascending= False) missing_train_percentage =(train_data.isnull().sum() /train_data.count() ).sort_values(ascending= False) missing_train_data = pd.concat([missing_train_total, missing_train_percentage], axis=1, keys=['Total', 'Percent']) missing_train_data.head(10 )<sort_values>
batch_size = 64 epochs = 20 history = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), epochs = epochs, validation_data =(x_test, y_test), verbose=2, steps_per_epoch=x_train.shape[0] // batch_size, callbacks = [reduce_lr] )
Digit Recognizer
9,468,425
missing_test_total = test_data.isnull().sum().sort_values(ascending= False) missing_test_percentage =(test_data.isnull().sum() /test_data.count() ).sort_values(ascending= False) missing_test_data = pd.concat([missing_test_total, missing_test_percentage], axis=1, keys=['Total', 'Percent']) missing_test_data.head(10 )<groupby>
model.evaluate(x_test,y_test )
Digit Recognizer
9,468,425
train_data.groupby('Pclass')['Age'].mean()<groupby>
y_pred=model.predict(x_test) y_preds=[np.argmax(pred)for pred in y_pred] accuracy_score(y_preds,y_test )
Digit Recognizer
9,468,425
train_data.groupby(['Pclass','Sex'])['Sex'].count()<groupby>
df_test=df_test.reshape(len(df_test),28,28,-1) a_pred=model.predict(( df_test)/255.0) a_preds=[np.argmax(pred)for pred in a_pred] samp=pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") samp["Label"]=a_preds samp.head()
Digit Recognizer
9,468,425
train_data.groupby('Sex')['Sex'].count()<groupby>
samp.to_csv("outputCNN.csv",index=False )
Digit Recognizer
9,468,425
mean = train_data.groupby(['Pclass','Sex'])['Age'].mean() median = train_data.groupby(['Pclass','Sex'])['Age'].median() age_sex_Pclass = pd.concat([mean, median], axis=1, keys=['Age mean', 'Age median']) age_sex_Pclass.head(6) <count_values>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import tensorflow as tf
Digit Recognizer
9,468,425
train_data.Embarked.value_counts()<prepare_x_and_y>
df=pd.read_csv(".. /input/digit-recognizer/train.csv") df.head()
Digit Recognizer
9,468,425
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X = train_data[features] y = train_data.Survived Pclass_Sex_Age_median = X.groupby(['Pclass','Sex'] ).Age.transform('median') X.Age.fillna(Pclass_Sex_Age_median, inplace = True) Pclass_Fare_median = X.groupby('Pclass' ).Fare.transform('median') X.Fare.fillna(Pclass_Fare_median, inplace = True) missing_test_total = X.isnull().sum().sort_values(ascending= False) X.Embarked.fillna('S', inplace = True) X['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True) X['Sex'].replace({'female': 0, 'male': 1}, inplace = True) X.info() print(X.Sex )<split>
df_test=pd.read_csv(".. /input/digit-recognizer/test.csv" ).values df_test[0:5]
Digit Recognizer
9,468,425
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size = 0.25, random_state=1) print(type(X_train)) <predict_on_test>
X=df.drop(["label"],axis="columns" ).values Y=df["label"].values X=X/255.0
Digit Recognizer
9,468,425
clf_mae=[] for md in range(8): clf = RandomForestClassifier(max_depth=(md+1), random_state=1) clf.fit(X_train, y_train) predictions = clf.predict(X_val) mae =(1.- mean_absolute_error(predictions, y_val)) *100. clf_mae.append(mae) print(clf_mae )<predict_on_test>
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2 )
Digit Recognizer
9,468,425
clf = RandomForestClassifier(max_depth=4, random_state=1) clf.fit(X_train, y_train) predictions = clf.predict(X_val) mae =(1.- mean_absolute_error(predictions, y_val)) *100. print(mae )<compute_train_metric>
X=X.reshape(len(X),28,28,-1) x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2 )
Digit Recognizer
9,468,425
logreg = LogisticRegression(solver='liblinear') logreg.fit(X_train, y_train) predictions = logreg.predict(X_val) mae = mean_absolute_error(predictions, y_val) print(( 1-mae)*100,"%") <sort_values>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
9,468,425
missing_test_total = test_data.isnull().sum().sort_values(ascending= False) missing_test_total.head()<categorify>
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x )
Digit Recognizer
9,468,425
Pclass_Sex_Age_median = test_data.groupby(['Pclass','Sex'] ).Age.transform('median') test_data.Age.fillna(Pclass_Sex_Age_median, inplace = True) Pclass_Fare_median = test_data.groupby('Pclass' ).Fare.transform('median') test_data.Fare.fillna(Pclass_Fare_median, inplace = True) test_data['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True) test_data['Sex'].replace({'female': 0, 'male': 1}, inplace = True) <sort_values>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(16,(3,3),padding='same',activation='relu',input_shape=(28,28,1)) , tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPooling2D() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(32,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.MaxPooling2D() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(64,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(64,(3,3),padding='same',activation='relu'), tf.keras.layers.BatchNormalization() , tf.keras.layers.Flatten() , tf.keras.layers.Dense(1024, activation="relu"), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(512, activation="relu"), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(10, activation="softmax") ]) optimizer=tf.keras.optimizers.Adam(lr=0.001) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
Digit Recognizer
9,468,425
missing_test_total = test_data.isnull().sum().sort_values(ascending= False) missing_test_total.head()<save_to_csv>
batch_size = 64 epochs = 20 history = model.fit_generator(datagen.flow(x_train, y_train, batch_size = batch_size), epochs = epochs, validation_data =(x_test, y_test), verbose=2, steps_per_epoch=x_train.shape[0] // batch_size, callbacks = [reduce_lr] )
Digit Recognizer
9,468,425
<data_type_conversions>
model.evaluate(x_test,y_test )
Digit Recognizer
9,468,425
name_list_train = train_data.Name.to_list() print(name_list_train )<count_values>
y_pred=model.predict(x_test) y_preds=[np.argmax(pred)for pred in y_pred] accuracy_score(y_preds,y_test )
Digit Recognizer
9,468,425
list_of_titles = title_retriever(name_list_train) print(len(list_of_titles)) train_data['Title'] = list_of_titles train_data.Title.value_counts()<categorify>
df_test=df_test.reshape(len(df_test),28,28,-1) a_pred=model.predict(( df_test)/255.0) a_preds=[np.argmax(pred)for pred in a_pred] samp=pd.read_csv(".. /input/digit-recognizer/sample_submission.csv") samp["Label"]=a_preds samp.head()
Digit Recognizer
9,468,425
Title_Age_median = train_data.groupby(['Title'] ).Age.transform('median') train_data.Age.fillna(Title_Age_median, inplace = True) <data_type_conversions>
samp.to_csv("outputCNN.csv",index=False )
Digit Recognizer
9,039,809
name_list_test = test_data.Name.to_list() print(name_list_test )<count_values>
import matplotlib.pyplot as plt import seaborn as sns import keras from keras.models import Sequential from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report,confusion_matrix
Digit Recognizer
9,039,809
list_of_titles_test = title_retriever(name_list_test) print(len(list_of_titles_test)) test_data['Title'] = list_of_titles_test test_data.Title.value_counts()<categorify>
train_df = pd.read_csv(".. /input/digit-recognizer/train.csv") test_df = pd.read_csv(".. /input/digit-recognizer/test.csv") submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv" )
Digit Recognizer
9,039,809
Title_Age_median_test = test_data.groupby(['Title'] ).Age.transform('median') test_data.Age.fillna(Title_Age_median_test, inplace = True) <categorify>
y_train = train_df['label'] y = train_df['label'] del train_df['label']
Digit Recognizer
9,039,809
X = train_data[features] y = train_data.Survived Pclass_Fare_median = X.groupby('Pclass' ).Fare.transform('median') X.Fare.fillna(Pclass_Fare_median, inplace = True) X.Embarked.fillna('S', inplace = True) X['Embarked'].replace({'S': 0, 'Q': 1, 'C': 2}, inplace = True) X['Sex'].replace({'female': 0, 'male': 1}, inplace = True) X.info() print(X.Sex) <save_to_csv>
label_binarizer = LabelBinarizer() y_train = label_binarizer.fit_transform(y_train) print(train_df.shape,y_train.shape)
Digit Recognizer
9,039,809
X_test = test_data[features] clf.fit(X, y) predictions = clf.predict(X_test) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('submission.csv', index=False) print("Your submission was successfully saved!" )<load_from_csv>
x_train = x_train / 255 x_test = x_test / 255
Digit Recognizer
9,039,809
train_data = pd.read_csv('/kaggle/input/titanic/train.csv') test_data = pd.read_csv('/kaggle/input/titanic/test.csv' )<filter>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
9,039,809
women = train_data.loc[train_data.Sex == 'female']['Survived'] print('Women survived',sum(women)/len(women)) men = train_data.loc[train_data.Sex == 'male']['Survived'] print('Men survived',sum(men)/len(men))<categorify>
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size = 0.1) print(x_train.shape,x_val.shape,y_train.shape,y_val.shape )
Digit Recognizer
9,039,809
train_data['female'] = pd.get_dummies(train_data['Sex'])['female'] test_data['female'] = pd.get_dummies(test_data['Sex'])['female']<feature_engineering>
model = Sequential() model.add(Conv2D(150 ,(3,3), strides = 1 , padding = 'same' , activation = 'relu' , input_shape =(28,28,1))) model.add(MaxPool2D(( 2,2), strides = 2 , padding = 'same')) model.add(Conv2D(100 ,(3,3), strides = 1 , padding = 'same' , activation = 'relu')) model.add(Dropout(0.1)) model.add(MaxPool2D(( 2,2), strides = 2 , padding = 'same')) model.add(Conv2D(75 ,(5,5), strides = 1 , padding = 'same' , activation = 'relu')) model.add(Dropout(0.2)) model.add(MaxPool2D(( 2,2), strides = 2 , padding = 'same')) model.add(Conv2D(50 ,(4,4), strides = 1 , padding = 'same' , activation = 'relu')) model.add(Dropout(0.15)) model.add(MaxPool2D(( 2,2), strides = 2 , padding = 'same')) model.add(Conv2D(25 ,(4,4), strides = 1 , padding = 'same' , activation = 'relu')) model.add(MaxPool2D(( 2,2), strides = 2 , padding = 'same')) model.add(Flatten()) model.add(Dense(units = 512 , activation = 'relu')) model.add(Dropout(0.25)) model.add(Dense(units = 10 , activation = 'softmax')) model.compile(optimizer = 'adam' , loss = 'categorical_crossentropy' , metrics = ['accuracy']) model.summary()
Digit Recognizer
9,039,809
sum(train_data['Age'].isnull()) train_data['Age'] = train_data['Age'].fillna(train_data['Age'].mean()) test_data['Age'] = test_data['Age'].fillna(test_data['Age'].mean() )<filter>
history = model.fit(datagen.flow(x_train,y_train, batch_size = 256),epochs = 30 , validation_data =(x_val, y_val))
Digit Recognizer
9,039,809
<filter><EOS>
y_pred=model.predict_classes(x_test) submission.head() submission=pd.DataFrame({'ImageId': submission.ImageId,'Label':y_pred}) submission.to_csv('/kaggle/working/submission.csv',index=False) check=pd.read_csv('/kaggle/working/submission.csv') check
Digit Recognizer
8,885,855
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_missing_values>
np.random.seed(2) %matplotlib inline for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Digit Recognizer
8,885,855
sum(test_data.Pclass.isna() )<categorify>
train=pd.read_csv('.. /input/digit-recognizer/train.csv') test=pd.read_csv('.. /input/digit-recognizer/test.csv') print(train.shape) print(test.shape)
Digit Recognizer
8,885,855
train_data['class1'] = pd.get_dummies(train_data.Pclass)[1] test_data['class1'] = pd.get_dummies(test_data.Pclass)[1] train_data['class2'] = pd.get_dummies(train_data.Pclass)[2] test_data['class2'] = pd.get_dummies(test_data.Pclass)[2]<count_missing_values>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1) del train print(X_train.shape) print(Y_train.shape) X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) print(X_train.shape) print(test.shape )
Digit Recognizer
8,885,855
sum(test_data.SibSp.isna() )<feature_engineering>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
8,885,855
sibs = train_data.loc[train_data.SibSp <= 1]['Survived'] print(sum(sibs)/len(sibs)) train_data['many_sibs'] =(train_data.SibSp > 1)*1 test_data['many_sibs'] =(test_data.SibSp > 1)*1<filter>
class LeNet: @staticmethod def build(input_shape, classes): model = Sequential() model.add(Conv2D(20, kernel_size=5, padding="same", input_shape=input_shape)) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(50, kernel_size=5, padding="same")) model.add(Activation("relu")) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(500)) model.add(Dropout(0.3)) model.add(Activation("relu")) model.add(Dense(classes)) model.add(Activation("softmax")) return model
Digit Recognizer
8,885,855
young = train_data.loc[train_data.Age <= 15]['Survived'] print(sum(young)/len(young)) old = train_data.loc[train_data.Age >=40]['Survived'] print(sum(old)/len(old))<feature_engineering>
NB_EPOCH = 20 BATCH_SIZE = 128 VERBOSE = 1 OPTIMIZER = Adam() VALIDATION_SPLIT=0.2 IMG_ROWS, IMG_COLS = 28, 28 NB_CLASSES = 10 INPUT_SHAPE =(IMG_ROWS, IMG_COLS, 1)
Digit Recognizer
8,885,855
bins = [0.42, 15, 30, 50,80] train_data['bin_age'] = pd.cut(x=train_data.Age, bins=bins) test_data['bin_age'] = pd.cut(x=test_data.Age, bins=bins )<categorify>
model = LeNet.build(input_shape=INPUT_SHAPE, classes=NB_CLASSES) model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
Digit Recognizer
8,885,855
train_data['young'] = pd.get_dummies(train_data.bin_age ).iloc[:,0] test_data['young'] = pd.get_dummies(test_data.bin_age ).iloc[:,0] train_data['senior'] = pd.get_dummies(train_data.bin_age ).iloc[:,3] test_data['senior'] = pd.get_dummies(test_data.bin_age ).iloc[:,3]<import_modules>
history = model.fit(X_train, Y_train, batch_size=BATCH_SIZE, epochs=NB_EPOCH, verbose=VERBOSE, validation_split=VALIDATION_SPLIT)
Digit Recognizer
8,885,855
from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix<split>
lr_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001) X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
8,885,855
X = train_data[features] y = train_data.Survived X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.33, random_state = 0 )<choose_model_class>
NB_EPOCH = 30 BATCH_SIZE = 128 VERBOSE = 1 model.compile(loss="categorical_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"]) history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=BATCH_SIZE), epochs = NB_EPOCH, validation_data =(X_val,Y_val), verbose = VERBOSE, steps_per_epoch=X_train.shape[0] // BATCH_SIZE , callbacks=[lr_reduction] )
Digit Recognizer
8,885,855
log_reg = LogisticRegression() log_reg.fit(X_train, y_train) y_pred = log_reg.predict(X_test) y_pred<compute_test_metric>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label")
Digit Recognizer
8,885,855
<compute_test_metric><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("LeNet_mnist_datagen.csv",index=False)
Digit Recognizer
10,762,457
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
%matplotlib inline np.random.seed(2) dense_regularizer = L1L2(l2=0.0001) os.chdir('/kaggle/working') sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
10,762,457
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1) model.fit(X_train, y_train) y_pred = model.predict(X_test )<compute_test_metric>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
10,762,457
accuracy_score(y_pred, y_test )<compute_test_metric>
Y = train["label"] X = train.drop(labels = ["label"],axis = 1 )
Digit Recognizer
10,762,457
confusion_matrix(y_pred, y_test )<feature_engineering>
X = X / 255.0 test = test / 255.0
Digit Recognizer
10,762,457
test_data.Fare = test_data.Fare.fillna(test_data.Fare.mean() )<define_search_space>
Y = to_categorical(Y, num_classes = 10 )
Digit Recognizer
10,762,457
param_grid = { 'n_estimators': [200, 500], 'max_features': ['auto', 'sqrt', 'log2'], 'max_depth' : [4,5,6,7,8], 'criterion' :['gini', 'entropy'] } <train_model>
random_seed = 2 X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size = 0.1, random_state=random_seed )
Digit Recognizer
10,762,457
rfc1=RandomForestClassifier(random_state=42, max_features='log2', n_estimators= 200, max_depth=6, criterion='entropy') rfc1.fit(X_train, y_train )<save_to_csv>
model1 = Sequential() model1.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'Same', activation ='relu', input_shape =(28,28,1))) model1.add(Conv2D(filters = 32, kernel_size =(5,5), padding = 'Same', activation ='relu')) model1.add(MaxPool2D(pool_size=(2,2))) model1.add(Dropout(0.25)) model1.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model1.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model1.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model1.add(Dropout(0.25)) model1.add(Flatten()) model1.add(Dense(256, activation = "relu")) model1.add(Dropout(0.5)) model1.add(Dense(10, activation = "softmax")) model1.compile(optimizer = 'adam' , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
10,762,457
predictions = rfc1.predict(test_data[features]) output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions}) output.to_csv('my_submission3.csv', index=False) print("Your submission was successfully saved!" )<import_modules>
epochs = 5 batch_size = 86 model1.fit( X_train, Y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_val, Y_val), verbose=2 )
Digit Recognizer
10,762,457
import numpy as np import pandas as pd from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.naive_bayes import MultinomialNB from sklearn.naive_bayes import ComplementNB from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import CategoricalNB from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import Perceptron from sklearn.svm import SVC from sklearn.linear_model import SGDClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from lightgbm import LGBMClassifier<load_from_csv>
results = model1.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission1 = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission1.to_csv("sub1.csv",index=False )
Digit Recognizer
10,762,457
train_data = pd.read_csv('.. /input/titanic/train.csv') test_data = pd.read_csv('.. /input/titanic/test.csv' )<count_missing_values>
model1.compile(optimizer = 'RMSprop' , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
10,762,457
missing_values = train_data.isna().any() print('Columns which have missing values: {0}'.format(missing_values[missing_values == True].index.tolist()))<count_missing_values>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
10,762,457
print("Percentage of missing values in `Age` column: {0:.2f}".format(100.*(train_data.Age.isna().sum() /len(train_data)))) print("Percentage of missing values in `Cabin` column: {0:.2f}".format(100.*(train_data.Cabin.isna().sum() /len(train_data)))) print("Percentage of missing values in `Embarked` column: {0:.2f}".format(100.*(train_data.Embarked.isna().sum() /len(train_data))))<count_duplicates>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
10,762,457
duplicates = train_data.duplicated().sum() print('Duplicates in train data: {0}'.format(duplicates))<count_unique_values>
epochs = 5 batch_size = 86 history = model1.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
10,762,457
categorical = train_data.nunique().sort_values(ascending=True) print('Categorical variables in train data: {0}'.format(categorical))<drop_column>
results = model1.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission2 = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission2.to_csv("sub2.csv",index=False )
Digit Recognizer
10,762,457
def clean_data(data): data.drop(['Cabin'], axis=1, inplace=True) data.drop(['Name', 'Ticket', 'Fare', 'Embarked'], axis=1, inplace=True) return data train_data = clean_data(train_data) test_data = clean_data(test_data )<data_type_conversions>
def Model_2(x=None): model = Sequential() model.add(Conv2D(64,(5, 5), input_shape=(28,28,1), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,(5, 5), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(64,(5, 5), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.3)) model.add(Conv2D(128,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(128,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(128,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(256,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(256,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(256,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Dropout(0.2)) model.add(Conv2D(512,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(Conv2D(512,(3, 3), padding='same', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) model.add(BatchNormalization()) model.add(Activation('elu')) model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3))) model.add(Flatten()) model.add(Dense(10, activation='softmax', kernel_regularizer=dense_regularizer,kernel_initializer="he_normal")) return model model2 = Model_2()
Digit Recognizer
10,762,457
train_data['Sex'].replace({'male':0, 'female':1}, inplace=True) test_data['Sex'].replace({'male':0, 'female':1}, inplace=True) all_data = pd.concat([train_data, test_data]) average = all_data.Age.median() print("Average Age: {0}".format(average)) train_data.fillna(value={'Age': average}, inplace=True) test_data.fillna(value={'Age': average}, inplace=True )<prepare_x_and_y>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model2.compile(optimizer = optimizer , loss = "sparse_categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
10,762,457
X = train_data.drop(['Survived', 'PassengerId'], axis=1) y = train_data['Survived'] test_X = test_data.drop(['PassengerId'], axis=1 )<train_on_grid>
Y = train["label"] Y = np.array(Y )
Digit Recognizer
10,762,457
best_models = {} train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) def print_best_parameters(hyperparameters, best_parameters): value = "Best parameters: " for key in hyperparameters: value += str(key)+ ": " + str(best_parameters[key])+ ", " if hyperparameters: print(value[:-2]) def get_best_model(estimator, hyperparameters, fit_params={}): cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) grid_search = GridSearchCV(estimator=estimator, param_grid=hyperparameters, n_jobs=-1, cv=cv, scoring="accuracy") best_model = grid_search.fit(train_X, train_y, **fit_params) best_parameters = best_model.best_estimator_.get_params() print_best_parameters(hyperparameters, best_parameters) return best_model def evaluate_model(model, name): print("Accuracy score:", accuracy_score(train_y, model.predict(train_X))) best_models[name] = model<choose_model_class>
k_fold = StratifiedKFold(n_splits=10, random_state=12, shuffle=True) batch_size = 86 epochs = 10 for k_train_index, k_test_index in k_fold.split(X, Y): model2.fit_generator(datagen.flow(X[k_train_index,:],Y[k_train_index], batch_size=batch_size), epochs = epochs, validation_data =(X[k_test_index,:],Y[k_test_index]), verbose = 2, steps_per_epoch=X[k_train_index,:].shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
10,762,457
hyperparameters = { 'solver' : ['newton-cg', 'lbfgs', 'liblinear'], 'penalty' : ['l2'], 'C' : [100, 10, 1.0, 0.1, 0.01] } estimator = LogisticRegression(random_state=1) best_model_logistic = get_best_model(estimator, hyperparameters )<find_best_params>
val_loss, val_acc = model2.evaluate(X, Y) val_acc
Digit Recognizer
10,762,457
evaluate_model(best_model_logistic.best_estimator_, 'logistic' )<choose_model_class>
results = model2.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission3 = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission3.to_csv("sub3.csv",index=False )
Digit Recognizer
10,286,233
hyperparameters = { 'var_smoothing': np.logspace(0, -9, num=100) } estimator = GaussianNB() best_model_gaussian_nb = get_best_model(estimator, hyperparameters )<find_best_params>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test_data = pd.read_csv('.. /input/digit-recognizer/test.csv') print(train.info() )
Digit Recognizer