kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,286,233 | evaluate_model(best_model_gaussian_nb.best_estimator_, 'gaussian_nb' )<choose_model_class> | X_train = train.iloc[:,1:]
y_train = train.iloc[:,0] | Digit Recognizer |
10,286,233 | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
}
estimator = MultinomialNB()
best_model_multinominal_nb = get_best_model(estimator, hyperparameters )<find_best_params> | X_train = X_train/255.0
test_data = test_data/255.0
X_train = X_train.values.reshape(-1,28,28,1)
test_data = test_data.values.reshape(-1,28,28,1 ) | Digit Recognizer |
10,286,233 | evaluate_model(best_model_multinominal_nb.best_estimator_, 'multinominal_nb' )<choose_model_class> | import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dropout, Lambda, Flatten, Dense
from keras.utils.np_utils import to_categorical | Digit Recognizer |
10,286,233 | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
'norm' : [True, False]
}
estimator = ComplementNB()
best_model_complement_nb = get_best_model(estimator, hyperparameters )<find_best_params> | y_train = to_categorical(y_train,num_classes=10 ) | Digit Recognizer |
10,286,233 | evaluate_model(best_model_complement_nb.best_estimator_, 'complement_nb' )<choose_model_class> | X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1, random_state=42 ) | Digit Recognizer |
10,286,233 | hyperparameters = {
'alpha' : [0.5, 1.0, 1.5, 2.0, 5],
'fit_prior' : [True, False],
}
estimator = BernoulliNB()
best_model_bernoulli_nb = get_best_model(estimator, hyperparameters )<find_best_params> | model = Sequential()
model.add(Conv2D(32,(5,5),padding='Same', activation='relu', kernel_initializer='glorot_uniform', input_shape=(28, 28, 1)))
model.add(Conv2D(32,(5,5),padding='Same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3), padding='Same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='Same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
10,286,233 | evaluate_model(best_model_bernoulli_nb.best_estimator_, 'bernoulli_nb' )<choose_model_class> | opt = RMSprop(lr=0.001)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
10,286,233 | hyperparameters = {
'n_neighbors' : list(range(1,5)) ,
'weights' : ['uniform', 'distance'],
'algorithm' : ['auto', 'ball_tree', 'kd_tree', 'brute'],
'leaf_size' : list(range(1,10)) ,
'p' : [1,2]
}
estimator = KNeighborsClassifier()
best_model_kneighbors = get_best_model(estimator, hyperparameters )<find_best_params> | from keras.preprocessing.image import ImageDataGenerator,img_to_array,load_img
import itertools | Digit Recognizer |
10,286,233 | evaluate_model(best_model_kneighbors.best_estimator_, 'kneighbors' )<choose_model_class> | datagen = ImageDataGenerator(rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1)
datagen.fit(X_train)
history = model.fit_generator(datagen.flow(X_train,y_train, batch_size=86),
epochs = 30, validation_data =(X_test,y_test),verbose = 2,
steps_per_epoch=X_train.shape[0]/86 ) | Digit Recognizer |
10,286,233 | hyperparameters = {
'penalty' : ['l1', 'l2', 'elasticnet'],
'eta0' : [0.0001, 0.001, 0.01, 0.1, 1.0],
'max_iter' : list(range(50, 200, 50))
}
estimator = Perceptron(random_state=1)
best_model_perceptron = get_best_model(estimator, hyperparameters )<find_best_params> | pred = model.predict(test_data)
pred | Digit Recognizer |
10,286,233 | evaluate_model(best_model_perceptron.best_estimator_, 'perceptron' )<find_best_params> | prediction = np.argmax(pred, axis = 1)
prediction | Digit Recognizer |
10,286,233 | hyperparameters = {
'C' : [0.1, 1, 10, 100],
'gamma' : [0.0001, 0.001, 0.01, 0.1, 1],
'kernel' : ['rbf']
}
estimator = SVC(random_state=1)
best_model_svc = get_best_model(estimator, hyperparameters )<find_best_params> | submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')
submission['Label'] = prediction
submission.head(10 ) | Digit Recognizer |
10,286,233 | evaluate_model(best_model_svc.best_estimator_, 'svc' )<choose_model_class> | submission.to_csv("submission.csv", index=False, header=True ) | Digit Recognizer |
9,566,226 | hyperparameters = {
'loss' : ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
'penalty' : ['l1', 'l2', 'elasticnet'],
'alpha' : [0.01, 0.1, 1, 10]
}
estimator = SGDClassifier(random_state=1, early_stopping=True)
best_model_sgd = get_best_model(estimator, hyperparameters )<find_best_params> | %matplotlib inline
np.random.seed(2)
| Digit Recognizer |
9,566,226 | evaluate_model(best_model_sgd.best_estimator_, 'sgd' )<choose_model_class> | from keras.layers import Dense
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import Activation | Digit Recognizer |
9,566,226 | hyperparameters = {
'loss' : ['deviance', 'exponential'],
'learning_rate' : [0.01, 0.1, 0.2, 0.3],
'n_estimators' : [50, 100, 200],
'subsample' : [0.1, 0.2, 0.5, 1.0],
'max_depth' : [2, 3, 4, 5]
}
estimator = GradientBoostingClassifier(random_state=1)
best_model_gbc = get_best_model(estimator, hyperparameters )<find_best_params> | train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
train_df.head() | Digit Recognizer |
9,566,226 | evaluate_model(best_model_gbc.best_estimator_, 'gbc' )<choose_model_class> | test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
test_df.head() | Digit Recognizer |
9,566,226 | hyperparameters = {
'n_estimators' : [10, 50, 100, 500],
'learning_rate' : [0.001, 0.01, 0.1, 1.0]
}
estimator = AdaBoostClassifier(random_state=1)
best_model_adaboost = get_best_model(estimator, hyperparameters )<find_best_params> | columns = train_df.columns
X = train_df[columns[columns != 'label']]
y = train_df['label'] | Digit Recognizer |
9,566,226 | evaluate_model(best_model_adaboost.best_estimator_, 'adaboost' )<choose_model_class> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state = 4 ) | Digit Recognizer |
9,566,226 | hyperparameters = {
'criterion' : ['gini', 'entropy'],
'splitter' : ['best', 'random'],
'max_depth' : [None, 1, 2, 3, 4, 5],
'min_samples_split' : list(range(2,5)) ,
'min_samples_leaf' : list(range(1,5))
}
estimator = DecisionTreeClassifier(random_state=1)
best_model_decision_tree = get_best_model(estimator, hyperparameters )<find_best_params> | X_train = X_train.values.reshape(X_train.shape[0], 28, 28, 1 ).astype('float32')
X_test = X_test.values.reshape(X_test.shape[0], 28, 28, 1 ).astype('float32' ) | Digit Recognizer |
9,566,226 | evaluate_model(best_model_decision_tree.best_estimator_, 'decision_tree' )<define_search_space> | X_train = X_train / 255
X_test = X_test / 255 | Digit Recognizer |
9,566,226 | hyperparameters = {
'n_estimators' : list(range(10, 50, 10)) ,
'max_features' : ['auto', 'sqrt', 'log2'],
'criterion' : ['gini', 'entropy'],
'max_depth' : [None, 1, 2, 3, 4, 5],
'min_samples_split' : list(range(2,5)) ,
'min_samples_leaf' : list(range(1,5))
}
estimator = RandomForestClassifier(random_state=1)
best_model_random_forest = get_best_model(estimator, hyperparameters )<find_best_params> | y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
num_classes = y_test.shape[1] | Digit Recognizer |
9,566,226 | evaluate_model(best_model_random_forest.best_estimator_, 'random_forest' )<init_hyperparams> | def convolutional_model() :
ADAMAX = optimizers.Adamax(lr = 0.002, beta_1 = 0.9, beta_2 = 0.999)
model = Sequential()
model.add(Conv2D(32,(4, 4), activation = 'relu', input_shape =(28, 28, 1)))
model.add(Conv2D(64,(3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2)))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3, 3), activation = 'relu'))
model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2)))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.2))
model.add(Conv2D(128,(2, 2), activation = 'relu'))
model.add(Conv2D(256,(2, 2), activation = 'relu'))
model.add(MaxPooling2D(pool_size =(2, 2), strides =(2, 2)))
model.add(BatchNormalization(axis = -1))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation = 'relu'))
model.add(BatchNormalization())
model.add(Dense(128, activation = 'relu'))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation = 'softmax'))
model.compile(optimizer = ADAMAX, loss = 'categorical_crossentropy', metrics = ['accuracy'])
return model | Digit Recognizer |
9,566,226 | hyperparameters = {
'learning_rate' : [0.3, 0.4, 0.5],
'gamma' : [0, 0.4, 0.8],
'max_depth' : [2, 3, 4],
'reg_lambda' : [0, 0.1, 1],
'reg_alpha' : [0.1, 1]
}
fit_params = {
'verbose' : False,
'early_stopping_rounds' : 40,
'eval_metric' : 'logloss',
'eval_set' : [(val_X, val_y)]
}
estimator = XGBClassifier(seed=1, tree_method='gpu_hist', predictor='gpu_predictor', use_label_encoder=False)
best_model_xgb = get_best_model(estimator, hyperparameters, fit_params )<find_best_params> | gen = ImageDataGenerator(rotation_range = 12, width_shift_range = 0.1, shear_range = 0.1,
height_shift_range = 0.1, zoom_range = 0.1, fill_mode = 'nearest', horizontal_flip = False,
vertical_flip = False, featurewise_center = False,
samplewise_center = False, featurewise_std_normalization = False,
samplewise_std_normalization = False)
test_gen = ImageDataGenerator()
train_generator = gen.flow(X_train, y_train, batch_size = 32)
test_generator = test_gen.flow(X_test, y_test, batch_size = 32 ) | Digit Recognizer |
9,566,226 | evaluate_model(best_model_xgb.best_estimator_, 'xgb' )<choose_model_class> | reduce_lr = ReduceLROnPlateau(monitor = 'val_accuracy', patience = 3, verbose = 1, factor = 0.4, min_lr = 0.00002,
mode = 'auto', cooldown = 0 ) | Digit Recognizer |
9,566,226 | hyperparameters = {
'boosting_type' : ['gbdt', 'dart', 'goss'],
'num_leaves' : [4, 8, 16, 32],
'learning_rate' : [0.01, 0.1, 1],
'n_estimators' : [25, 50, 100],
'reg_alpha' : [0, 0.1, 1],
'reg_lambda' : [0, 0.1, 1],
}
estimator = LGBMClassifier(random_state=1, device='gpu')
best_model_lgbm = get_best_model(estimator, hyperparameters )<find_best_params> | model = convolutional_model()
epochs = 80
history = model.fit_generator(train_generator, steps_per_epoch = 40000//16, epochs = epochs,
validation_data = test_generator, validation_steps = 10000//8, verbose = 1,
callbacks=[reduce_lr])
scores = model.evaluate(X_test, y_test, verbose = 0)
print("Accuracy: {}
Error: {}".format(scores[1], 100-scores[1]*100))
plot_loss_accuracy(history ) | Digit Recognizer |
9,566,226 | evaluate_model(best_model_lgbm.best_estimator_, 'lgbm' )<save_to_csv> | test_data = test_df.values.reshape(test_df.shape[0], 28, 28, 1 ).astype('float32')
test_data = test_data / 255
Y_pred = model.predict(test_data ) | Digit Recognizer |
9,566,226 | for model in best_models:
predictions = best_models[model].predict(test_X)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('submission_' + model + '.csv', index=False )<set_options> | Y_pred = np.argmax(Y_pred,axis = 1)
Y_pred = pd.Series(Y_pred, name = "Label" ) | Digit Recognizer |
9,566,226 | !pip install -q -U keras-tuner
clear_output()
<define_variables> | submission_df = pd.DataFrame({
"ImageId": pd.Series(range(1, len(Y_pred)+1)) ,
"Label": pd.Series(Y_pred)} ) | Digit Recognizer |
9,566,226 | <load_from_csv><EOS> | submission_df.to_csv('/kaggle/working/Submission.csv', index = False ) | Digit Recognizer |
9,303,840 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify> | import numpy as np
import pandas as pd
from time import time
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization, Flatten, Dense, Dropout
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator | Digit Recognizer |
9,303,840 | drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp','Parch']
train = train.drop(drop_elements, axis = 1)
test = test.drop(drop_elements, axis = 1)
def checkNull_fillData(df):
for col in df.columns:
if len(df.loc[df[col].isnull() == True])!= 0:
if df[col].dtype == "float64" or df[col].dtype == "int64":
df.loc[df[col].isnull() == True,col] = df[col].mean()
else:
df.loc[df[col].isnull() == True,col] = df[col].mode() [0]
checkNull_fillData(train)
checkNull_fillData(test)
str_list = []
num_list = []
for colname, colvalue in train.iteritems() :
if type(colvalue[1])== str:
str_list.append(colname)
else:
num_list.append(colname)
train = pd.get_dummies(train, columns=str_list)
test = pd.get_dummies(test, columns=str_list )<prepare_x_and_y> | data_train = pd.read_csv('.. /input/digit-recognizer/train.csv')
data_test = pd.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
9,303,840 | y = train[TARGET]
X = train.drop([TARGET],axis=1)
X_test = test
gc.collect()<split> | target_train = data_train.iloc[:, 0].values
image_train = data_train.iloc[:, 1:].values
image_test = data_test.values | Digit Recognizer |
9,303,840 | X_train,X_val,y_train,y_val=train_test_split(X,y,test_size=TEST_SIZE,random_state=RANDOM_SEED )<train_on_grid> | X = image_train.reshape(-1,28,28,1 ).astype('float32')
X_test = image_test.reshape(-1,28,28,1 ).astype('float32')
y = to_categorical(target_train.reshape(-1,1), num_classes=10)
X /= 255
X_test /= 255
plt.imshow(image_train[0].reshape(28,28)) | Digit Recognizer |
9,303,840 | def build_random_forest(hp):
model = ensemble.RandomForestClassifier(
n_estimators=hp.Int('n_estimators', 10, 50, step=10),
max_depth=hp.Int('max_depth', 3, 10))
return model
tuner = kt.tuners.Sklearn(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('score', 'max'),
max_trials=10),
hypermodel= build_random_forest,
directory='.',
project_name='random_forest')
tuner.search(X_train.values, y_train.values.ravel())
best_hp = tuner.get_best_hyperparameters(num_trials=1)[0]<train_model> | datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1)
datagen.fit(X ) | Digit Recognizer |
9,303,840 | model = tuner.hypermodel.build(best_hp)
model.fit(X_train, y_train.values )<predict_on_test> | num_models = 15
model = [None] * num_models
for i in range(num_models):
model[i] = Sequential()
model[i].add(Conv2D(32,(3, 3), activation='relu', input_shape=(28,28,1)))
model[i].add(BatchNormalization())
model[i].add(Conv2D(32,(3, 3), activation='relu'))
model[i].add(BatchNormalization())
model[i].add(MaxPooling2D(( 2, 2), strides=2))
model[i].add(BatchNormalization())
model[i].add(Conv2D(64,(3, 3), activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Conv2D(64,(3, 3), activation='relu'))
model[i].add(BatchNormalization())
model[i].add(MaxPooling2D(( 2, 2), strides=2))
model[i].add(Flatten())
model[i].add(BatchNormalization())
model[i].add(Dense(256, activation='relu'))
model[i].add(BatchNormalization())
model[i].add(Dropout(0.5))
model[i].add(Dense(10, activation='softmax'))
model[i].compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
for i in range(num_models):
t = time()
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.1, random_state=i)
history = model[i].fit_generator(datagen.flow(X_train, y_train, batch_size=512), epochs=50, verbose=0,
validation_data=(X_val, y_val))
print(history.history['val_accuracy'][-1], time() -t ) | Digit Recognizer |
9,303,840 | pred_val = model.predict(X_val)
print(accuracy_score(y_val, pred_val))<predict_on_test> | y_pred = np.zeros(( X_test.shape[0], 10))
for i in range(num_models):
y_pred += model[i].predict(X_test, batch_size=512, verbose=1)
y_pred = np.argmax(y_pred, axis=1)
submissions = pd.DataFrame({'ImageId': list(range(1,len(y_pred)+1)) ,
'Label': y_pred})
submissions.to_csv('submission.csv', index=False, header=True ) | Digit Recognizer |
9,303,840 | <save_to_csv><EOS> | y_pred_val = np.zeros(( X_val.shape[0], 10))
for i in range(num_models):
y_pred_val += model[i].predict(X_val, batch_size=512, verbose=1)
y_pred_val = np.argmax(y_pred_val, axis=1)
y_true_val = np.argmax(y_val, axis=1)
print(classification_report(y_true_val, y_pred_val, digits=4))
| Digit Recognizer |
9,241,530 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options> | %matplotlib inline
| Digit Recognizer |
9,241,530 | warnings.filterwarnings('ignore' )<load_from_csv> | data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
print(data.shape ) | Digit Recognizer |
9,241,530 | train = pd.read_csv('/kaggle/input/titanic/train.csv')
test=pd.read_csv('/kaggle/input/titanic/test.csv')
sub = pd.read_csv('/kaggle/input/titanic/gender_submission.csv' )<sort_values> | test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print(test_data.shape ) | Digit Recognizer |
9,241,530 | train.isnull().sum().sort_values(ascending = False )<sort_values> | sample_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
print(sample_submission.shape ) | Digit Recognizer |
9,241,530 | test.isnull().sum().sort_values(ascending = False )<feature_engineering> | encoder = OneHotEncoder(sparse=False,categories='auto')
yy = [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]
encoder.fit(yy)
train_label = train_label.reshape(-1,1)
val_label = val_label.reshape(-1,1)
train_label = encoder.transform(train_label)
val_label = encoder.transform(val_label)
print('train_label shape: %s'%str(train_label.shape))
print('val_label shape: %s'%str(val_label.shape)) | Digit Recognizer |
9,241,530 | train.loc[train.Cabin.notnull() ,'Cabin']=1
train.loc[train.Cabin.isnull() ,'Cabin']=0<feature_engineering> | import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
from keras.layers import LeakyReLU
| Digit Recognizer |
9,241,530 | test.loc[test.Cabin.notnull() ,'Cabin']=1
test.loc[test.Cabin.isnull() ,'Cabin']=0<count_missing_values> | model = Sequential()
model.add(Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(32,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu', name='my_dense'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary() | Digit Recognizer |
9,241,530 | train.Cabin.isnull().sum()<count_missing_values> | datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range = 15,
horizontal_flip = False,
zoom_range = 0.20 ) | Digit Recognizer |
9,241,530 | test.Cabin.isnull().sum()<define_variables> | model.compile(loss='categorical_crossentropy',optimizer=Adam() ,metrics=['accuracy'])
datagen.fit(train_image)
history = model.fit_generator(datagen.flow(train_image,train_label, batch_size=32),
epochs = 75,
shuffle=True,
validation_data =(val_image,val_label),
verbose = 1,
steps_per_epoch=train_image.shape[0] // 32 ) | Digit Recognizer |
9,241,530 | def detect_outliers(df, n, features):
outlier_indices = []
for col in features:
Q1 = np.percentile(df[col], 25)
Q3 = np.percentile(df[col], 75)
IQR = Q3 - Q1
outlier_step = 1.5 * IQR
outlier_list_col = df[(df[col] < Q1 - outlier_step)|(df[col] > Q3 + outlier_step)].index
outlier_indices.extend(outlier_list_col)
outlier_indices = Counter(outlier_indices)
multiple_outliers = list(key for key, value in outlier_indices.items() if value > n)
return multiple_outliers<define_variables> | intermediate_output = intermediate_layer_model.predict(train_image)
intermediate_output = pd.DataFrame(data=intermediate_output ) | Digit Recognizer |
9,241,530 | outliers_to_drop = detect_outliers(train, 2, ['Age', 'SibSp', 'Parch', 'Fare'])
print("The {} indices for the outliers to drop are: ".format(len(outliers_to_drop)) , outliers_to_drop )<filter> | val_data = intermediate_output[40000:] | Digit Recognizer |
9,241,530 | train.loc[outliers_to_drop, :]<drop_column> | submission_cnn = model.predict(test_image ) | Digit Recognizer |
9,241,530 | print("Before: {} rows".format(len(train)))
train = train.drop(outliers_to_drop, axis = 0 ).reset_index(drop = True)
print("After: {} rows".format(len(train)) )<filter> | intermediate_test_output = intermediate_layer_model.predict(test_image)
intermediate_test_output = pd.DataFrame(data=intermediate_test_output ) | Digit Recognizer |
9,241,530 | outliers_to_drop_to_test = detect_outliers(test, 2, ['Age', 'SibSp', 'Parch', 'Fare'] )<filter> | xgbmodel = XGBClassifier(objective='multi:softprob',
num_class= 10)
xgbmodel.fit(intermediate_output, train_label1)
xgbmodel.score(val_data, val_label1 ) | Digit Recognizer |
9,241,530 | test.loc[outliers_to_drop_to_test, :]<count_values> | submission_xgb = xgbmodel.predict(intermediate_test_output ) | Digit Recognizer |
9,241,530 | train['SibSp'].value_counts(dropna = False )<sort_values> | submission_cnn = submission_cnn.astype(int)
submission_xgb = submission_xgb.astype(int)
| Digit Recognizer |
9,241,530 | train[['SibSp', 'Survived']].groupby('SibSp', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<count_values> | submission_cnn
label = np.argmax(submission_cnn,1)
id_ = np.arange(0,label.shape[0])
label | Digit Recognizer |
9,241,530 | train['Parch'].value_counts(dropna = False )<sort_values> | final_sub = submission_xgb | Digit Recognizer |
9,241,530 | train[['Parch', 'Survived']].groupby('Parch', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<count_missing_values> | save = pd.DataFrame({'ImageId':sample_submission.ImageId,'label':final_sub})
print(save.head(10))
save.to_csv('submission.csv',index=False ) | Digit Recognizer |
9,241,530 | train['Age'].isnull().sum()<count_missing_values> | %matplotlib inline
| Digit Recognizer |
9,241,530 | train['Fare'].isnull().sum()<count_values> | data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
print(data.shape ) | Digit Recognizer |
9,241,530 | train['Pclass'].value_counts(dropna = False )<sort_values> | test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print(test_data.shape ) | Digit Recognizer |
9,241,530 | train[['Pclass', 'Survived']].groupby('Pclass', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<count_values> | sample_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
print(sample_submission.shape ) | Digit Recognizer |
9,241,530 | train['Sex'].value_counts(dropna = False )<sort_values> | encoder = OneHotEncoder(sparse=False,categories='auto')
yy = [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9]]
encoder.fit(yy)
train_label = train_label.reshape(-1,1)
val_label = val_label.reshape(-1,1)
train_label = encoder.transform(train_label)
val_label = encoder.transform(val_label)
print('train_label shape: %s'%str(train_label.shape))
print('val_label shape: %s'%str(val_label.shape)) | Digit Recognizer |
9,241,530 | train[['Sex', 'Survived']].groupby('Sex', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<count_values> | import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from keras.layers.normalization import BatchNormalization
from keras.layers import LeakyReLU
| Digit Recognizer |
9,241,530 | train['Embarked'].value_counts(dropna = False )<sort_values> | model = Sequential()
model.add(Conv2D(32,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(32,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(64,(3, 3), activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(128, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(Conv2D(256, kernel_size=5, activation='relu',padding='same'))
model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, gamma_initializer="uniform"))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu', name='my_dense'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary() | Digit Recognizer |
9,241,530 | train[['Embarked', 'Survived']].groupby(['Embarked'], as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<drop_column> | datagen = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range = 15,
horizontal_flip = False,
zoom_range = 0.20 ) | Digit Recognizer |
9,241,530 | train = train.drop(['Ticket'], axis = 1)
test = test.drop(['Ticket'], axis = 1 )<sort_values> | model.compile(loss='categorical_crossentropy',optimizer=Adam() ,metrics=['accuracy'])
datagen.fit(train_image)
history = model.fit_generator(datagen.flow(train_image,train_label, batch_size=32),
epochs = 75,
shuffle=True,
validation_data =(val_image,val_label),
verbose = 1,
steps_per_epoch=train_image.shape[0] // 32 ) | Digit Recognizer |
9,241,530 | train.isnull().sum().sort_values(ascending = False )<set_options> | intermediate_output = intermediate_layer_model.predict(train_image)
intermediate_output = pd.DataFrame(data=intermediate_output ) | Digit Recognizer |
9,241,530 | mode = train['Embarked'].dropna().mode() [0]
mode<set_options> | val_data = intermediate_output[40000:] | Digit Recognizer |
9,241,530 | train['Embarked'].fillna(mode, inplace = True )<sort_values> | submission_cnn = model.predict(test_image ) | Digit Recognizer |
9,241,530 | test.isnull().sum().sort_values(ascending = False )<correct_missing_values> | intermediate_test_output = intermediate_layer_model.predict(test_image)
intermediate_test_output = pd.DataFrame(data=intermediate_test_output ) | Digit Recognizer |
9,241,530 | test['Fare'].fillna(median, inplace = True )<concatenate> | xgbmodel = XGBClassifier(objective='multi:softprob',
num_class= 10)
xgbmodel.fit(intermediate_output, train_label1)
xgbmodel.score(val_data, val_label1 ) | Digit Recognizer |
9,241,530 | combine = pd.concat([train, test], axis = 0 ).reset_index(drop = True)
combine.head()<sort_values> | submission_xgb = xgbmodel.predict(intermediate_test_output ) | Digit Recognizer |
9,241,530 | combine.isnull().sum().sort_values(ascending = False )<categorify> | submission_cnn = submission_cnn.astype(int)
submission_xgb = submission_xgb.astype(int)
| Digit Recognizer |
9,241,530 | combine['Sex'] = combine['Sex'].map({'male': 0, 'female': 1} )<filter> | submission_cnn
label = np.argmax(submission_cnn,1)
id_ = np.arange(0,label.shape[0])
label | Digit Recognizer |
9,241,530 | age_nan_indices = list(combine[combine['Age'].isnull() ].index)
len(age_nan_indices )<define_variables> | final_sub = submission_xgb | Digit Recognizer |
9,241,530 | <count_missing_values><EOS> | save = pd.DataFrame({'ImageId':sample_submission.ImageId,'label':final_sub})
print(save.head(10))
save.to_csv('submission.csv',index=False ) | Digit Recognizer |
8,946,188 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf | Digit Recognizer |
8,946,188 | train['Fare'] = train['Fare'].map(lambda x: np.log(x)if x > 0 else 0 )<feature_engineering> | train_data = pd.read_csv('.. /input/train.csv')
train_y = train_data["label"]
train_data.drop(["label"], axis=1, inplace=True)
train_X = train_data
train_X = train_X.values.reshape(-1, 28, 28, 1)
train_y = train_y.values
train_y = tf.keras.utils.to_categorical(train_y)
train_X = train_X/255.00
test_X = pd.read_csv('.. /input/test.csv')
test_X = test_X.values.reshape(-1,28,28,1)
test_X = test_X / 255.0 | Digit Recognizer |
8,946,188 | combine['Title'] = [name.split(',')[1].split('.')[0].strip() for name in combine['Name']]
combine[['Name', 'Title']].head()<count_values> | model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, kernel_size =(3,3), padding = 'same', activation ='relu', input_shape =(28,28,1)) ,
tf.keras.layers.Conv2D(32, kernel_size =(3,3), padding = 'same', activation ='relu'),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Conv2D(32, kernel_size =(3,3), padding = 'same', activation ='relu'),
tf.keras.layers.Conv2D(32, kernel_size =(3,3), padding = 'same', activation ='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=2),
tf.keras.layers.Conv2D(32, kernel_size =(7,7), padding = 'same', activation ='relu'),
tf.keras.layers.Conv2D(32, kernel_size =(7,7), padding = 'same', activation ='relu'),
tf.keras.layers.MaxPool2D(pool_size=(2,2), strides=2),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dense(256, activation = "relu"),
tf.keras.layers.Dense(10, activation = "softmax")
] ) | Digit Recognizer |
8,946,188 | combine['Title'].value_counts()<count_unique_values> | datagen = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=43, zoom_range=0.24)
datagen.fit(train_X)
ln_fc = lambda x: 1e-3 * 0.99 ** x
lrng_rt = tf.keras.callbacks.LearningRateScheduler(ln_fc)
digitizer = model.fit_generator(datagen.flow(train_X, train_y, batch_size=1024), epochs=80, callbacks=[lrng_rt] ) | Digit Recognizer |
8,946,188 | <categorify><EOS> | predictions = model.predict(test_X)
predictions[354]
pred = np.argmax(predictions, axis=1)
plt.imshow(test_X[354][:,:,0],cmap='gray')
plt.show()
pred[354]
pred_digits = pd.DataFrame({'ImageId': range(1,len(test_X)+1),'Label':pred })
pred_digits.to_csv("pre_digits.csv",index=False ) | Digit Recognizer |
8,655,915 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<sort_values> | %reload_ext autoreload
%autoreload 2
%matplotlib inline | Digit Recognizer |
8,655,915 | combine[['Title', 'Survived']].groupby(['Title'], as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<drop_column> | print(torch.cuda.is_available() , torch.backends.cudnn.enabled ) | Digit Recognizer |
8,655,915 | combine = combine.drop('Name', axis = 1)
combine.head()<feature_engineering> | train_df = pd.read_csv(path/"train.csv")
train_df.head() | Digit Recognizer |
8,655,915 | combine['Family_Size'] = combine['SibSp'] + combine['Parch'] + 1
combine[['SibSp', 'Parch', 'Family_Size']].head(10 )<sort_values> | test_df = pd.read_csv(path/"test.csv")
test_df.head() | Digit Recognizer |
8,655,915 | combine[['Family_Size', 'Survived']].groupby('Family_Size', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<feature_engineering> | TRAIN = Path(".. /train")
TEST = Path(".. /test" ) | Digit Recognizer |
8,655,915 | combine['Alone'] = 0
combine.loc[combine['Family_Size'] == 1, 'Alone'] = 1<sort_values> | if os.path.isdir(TRAIN):
print('Train directory has been created')
else:
print('Train directory creation failed.')
if os.path.isdir(TEST):
print('Test directory has been created')
else:
print('Test directory creation failed.' ) | Digit Recognizer |
8,655,915 | combine[['Alone', 'Survived']].groupby('Alone', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<drop_column> | from PIL import Image | Digit Recognizer |
8,655,915 | combine = combine.drop(['SibSp', 'Parch', 'Family_Size'], axis = 1)
combine.head()<feature_engineering> | def pix2img(pix_data, filepath):
img_mat = pix_data.reshape(28,28)
img_mat = img_mat.astype(np.uint8())
img_dat = Image.fromarray(img_mat)
img_dat.save(filepath ) | Digit Recognizer |
8,655,915 | combine['Minor'] = combine['Age'] <= 17
combine['Major'] = 1 - combine['Minor']<sort_values> | tfms = get_transforms(do_flip = False ) | Digit Recognizer |
8,655,915 | combine[['Major', 'Survived']].groupby('Major', as_index = False ).mean().sort_values(by = 'Survived', ascending = False )<feature_engineering> | print('test : ',TEST)
print('train: ', TRAIN)
print(type(TEST)) | Digit Recognizer |
8,655,915 | combine.loc[(combine['Age'] <= 17), 'Major'] = 0
combine.loc[(combine['Age'] > 17), 'Major'] = 1<drop_column> | path =(".. /train")
| Digit Recognizer |
8,655,915 | combine = combine.drop(['Age', 'Minor'], axis = 1)
combine.head()<categorify> | np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", test =(".. /test"), valid_pct=0.2,
ds_tfms=get_transforms() , size=28, num_workers=0 ).normalize(imagenet_stats ) | Digit Recognizer |
8,655,915 | combine = pd.get_dummies(combine, columns = ['Title'])
combine = pd.get_dummies(combine, columns = ['Embarked'], prefix = 'Em')
combine.head()<feature_engineering> | learn = cnn_learner(data, base_arch = models.resnet34, metrics = accuracy,model_dir="/tmp/models", callback_fns=ShowGraph ) | Digit Recognizer |
8,655,915 | combine.loc[combine['Fare'] <= 1.56, 'Fare'] = 0
combine.loc[(combine['Fare'] > 1.56)&(combine['Fare'] <= 3.119), 'Fare'] = 1
combine.loc[(combine['Fare'] > 3.119)&(combine['Fare'] <= 4.679), 'Fare'] = 2
combine.loc[combine['Fare'] > 4.679, 'Fare'] = 3<data_type_conversions> | learn.fit_one_cycle(5, 1e-03 ) | Digit Recognizer |
8,655,915 | combine['Fare'] = combine['Fare'].astype('int' )<drop_column> | learn.save('model1' ) | Digit Recognizer |
8,655,915 | combine = combine.drop('Fare_Band', axis = 1 )<split> | learn.fit_one_cycle(5 , 1e-04 ) | Digit Recognizer |
8,655,915 | train = combine[:len(train)]
test = combine[len(train):]<drop_column> | learn.fit_one_cycle(5 , slice(1e-05,1e-04)) | Digit Recognizer |
8,655,915 | train = train.drop('PassengerId', axis = 1)
train.head()<data_type_conversions> | learn.fit_one_cycle(5 , slice(1e-06,1e-05)) | Digit Recognizer |
8,655,915 | train['Survived'] = train['Survived'].astype('int')
train.head()<drop_column> | np.random.seed(42)
data = ImageDataBunch.from_folder(path, train=".", test =(".. /test"), valid_pct=0.2,
ds_tfms=get_transforms() , size=69, num_workers=0 ).normalize(imagenet_stats)
learn.data = data
data.train_ds[0][0].shape | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.