kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
13,441,242 | <create_dataframe><EOS> | submissions = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
submissions['Label'] = results
submissions.to_csv('submission.csv', index = False ) | Digit Recognizer |
13,088,098 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<find_best_params> | def set_seeds(offset):
np.random.seed(100+offset)
return(100+offset)
tempseed = set_seeds(0)
image_width = 28
image_height = 28
batch_size = 256
no_epochs = 40
no_classes = 10
ensemble_size = 1
validation_split = 0.2
verbosity = 1
base_filters = 256
| Digit Recognizer |
13,088,098 | for fold in range(1,n_folds+1):
print(f"Running on fold {fold}")
model=create_model()
model.load_state_dict(torch.load(f"fold{fold}Best.pt"))
model.to(device)
model.eval()
for i,image in enumerate(tqdm.tqdm(TestSet)) :
image=torch.unsqueeze(image,0 ).to(device)
outputs=model(image)
model_outputs[i]+=outputs.detach() [0]<load_from_csv> | train = pd.read_csv(".. /input/digit-recognizer/train.csv")
finaltest = pd.read_csv(".. /input/digit-recognizer/test.csv")
train['label'].value_counts(normalize=False ) | Digit Recognizer |
13,088,098 | submission_df=pd.read_csv(".. /input/digit-recognizer/sample_submission.csv" )<feature_engineering> | datagen = ImageDataGenerator(
rotation_range=35,
width_shift_range=0.3,
height_shift_range=0.2,
shear_range=0.3,
zoom_range=0.2,
validation_split=validation_split,
horizontal_flip=False,
)
| Digit Recognizer |
13,088,098 | submission_df["Label"]=np.argmax(model_outputs.cpu().numpy() ,1 )<save_to_csv> | trainlabels = train['label']
trainimages = train.drop(['label'], axis = 1)
trainimages = trainimages.values.reshape(-1, image_width, image_height, 1)
trainimages = trainimages.astype('float32')
trainimages = trainimages/255
testset = finaltest.values.reshape(-1, image_width, image_height, 1)
testset = testset.astype('float32')
testset = testset/255
| Digit Recognizer |
13,088,098 | submission_df.to_csv("submission.csv", index=False )<load_from_csv> | x_train, x_test, y_train, y_test = train_test_split(trainimages, trainlabels, test_size = validation_split ) | Digit Recognizer |
13,088,098 | pd.read_csv("submission.csv" )<import_modules> | train_generator = datagen.flow(x_train, y_train, batch_size=batch_size, shuffle=True, subset='training')
val_generator = datagen.flow(x_test, y_test, batch_size=batch_size, subset='validation')
| Digit Recognizer |
13,088,098 | for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | models = list()
for i in range(ensemble_size):
model = Sequential()
model.add(Conv2D(filters =(int(base_filters/8)) , kernel_size =(7,7), padding = 'Same', activation = 'relu', input_shape =(image_width, image_height, 1)))
model.add(Conv2D(filters =(base_filters/8), kernel_size =(7,7), padding = 'Same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size =(2,2), strides =(2,2)))
model.add(Dropout(0.25, seed=tempseed))
model.add(Conv2D(filters =(base_filters/4), kernel_size =(5,5), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters =(base_filters/4), kernel_size =(5,5), padding = 'Same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size =(2,2), strides =(2,2)))
model.add(Dropout(0.25, seed=tempseed))
model.add(Conv2D(filters =(base_filters/2), kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters =(base_filters/2), kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size =(2,2), strides =(2,2)))
model.add(Dropout(0.25, seed=tempseed))
model.add(Conv2D(filters =(base_filters), kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(Conv2D(filters =(base_filters), kernel_size =(3,3), padding = 'Same', activation = 'relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size =(2,2), strides =(2,2)))
model.add(Dropout(0.25, seed=tempseed))
model.add(Flatten())
model.add(Dense(base_filters, activation = "relu"))
model.add(Dense(base_filters/2, activation = "relu"))
model.add(Dense(no_classes, activation = "softmax"))
models.insert(i, model)
models[0].summary() | Digit Recognizer |
13,088,098 | np.random.seed(1)
df_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df_train = df_train.iloc[np.random.permutation(len(df_train)) ]<prepare_x_and_y> | optimizer = Adam(lr=0.0005, decay=1e-9 ) | Digit Recognizer |
13,088,098 | sample_size = df_train.shape[0]
validation_size = int(df_train.shape[0] * 0.1)
train_x = np.asarray(df_train.iloc[:sample_size - validation_size:, 1:] ).reshape([sample_size - validation_size, 28, 28, 1])
train_y = np.asarray(df_train.iloc[:sample_size - validation_size:, 0] ).reshape([sample_size - validation_size, 1])
val_x = np.asarray(df_train.iloc[sample_size - validation_size:,1:] ).reshape([validation_size,28,28,1])
val_y = np.asarray(df_train.iloc[sample_size - validation_size:, 0] ).reshape([validation_size, 1] )<load_from_csv> | for i in range(len(models)) :
models[i].compile(optimizer = optimizer, loss = "sparse_categorical_crossentropy", metrics = ["accuracy"] ) | Digit Recognizer |
13,088,098 | df_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
test_x = np.asarray(df_test.iloc[:, :] ).reshape([-1, 28, 28, 1] )<feature_engineering> | learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_accuracy', patience = 3, verbose = 1, factor = 0.25, min_lr = 0.0000001 ) | Digit Recognizer |
13,088,098 | train_x = train_x/255
val_x = val_x/255
test_x = test_x/255<choose_model_class> | callbacks = [learning_rate_reduction] | Digit Recognizer |
13,088,098 | model = models.Sequential()<choose_model_class> | histories = list()
for i in range(len(models)) :
tempseed = set_seeds(i)
histories.insert(i, models[i].fit_generator(
train_generator,
epochs = no_epochs,
verbose = 1,
validation_data = val_generator,
callbacks = [callbacks]
))
| Digit Recognizer |
13,088,098 | <choose_model_class><EOS> | predictions = np.zeros(( testset.shape[0],10))
for i in range(ensemble_size):
predictions = predictions + models[i].predict(testset)
finalpredictions = np.argmax(predictions, axis = 1)
submissions = {'ImageID':list(range(1, len(finalpredictions)+ 1)) ,'Label': finalpredictions}
submission_df = pd.DataFrame(submissions ).astype('int')
submission_df.head()
submission_df.to_csv('submission.csv', index=False ) | Digit Recognizer |
14,272,089 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf | Digit Recognizer |
14,272,089 | epochs = 20
batch_size = 256
history_1 = model.fit(train_x, train_y, batch_size = batch_size, epochs = epochs, validation_data =(val_x, val_y))<compute_train_metric> | mnist = pd.read_csv("/kaggle/input/digit-recognizer/train.csv" ) | Digit Recognizer |
14,272,089 | val_p = np.argmax(model.predict(val_x), axis = 1)
error = 0
confusion_matrix = np.zeros([10, 10])
for i in range(val_x.shape[0]):
confusion_matrix[val_y[i], val_p[i]] += 1
if val_y[i] != val_p[i]:
error += 1
print("Confusion Matrix:
", confusion_matrix)
print("
Errors in validation set: ", error)
print("
Error Persentage: ",(error * 100)/ val_p.shape[0])
print("
Accuracy: ", 100 -(error * 100)/ val_p.shape[0])
print("
Validation set Shape: ", val_p.shape[0] )<train_model> | y = mnist.iloc[:, 0].values
X = mnist.iloc[:, 1:].values | Digit Recognizer |
14,272,089 | datagen = ImageDataGenerator(
featurewise_center = False,
samplewise_center = False,
featurewise_std_normalization = False,
samplewise_std_normalization = False,
zca_whitening = False,
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
horizontal_flip = False,
vertical_flip = False)
datagen.fit(train_x )<choose_model_class> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1
, random_state = 0 ) | Digit Recognizer |
14,272,089 | lrr = ReduceLROnPlateau(monitor = 'val_accuracy', patience = 2, verbose = 1, factor = 0.5, min_lr = 0.00001 )<train_model> | X_train = X_train.reshape(-1, 28, 28, 1 ).astype('float32')/ 255.0
X_test = X_test.reshape(-1, 28, 28, 1 ).astype('float32')/ 255.0 | Digit Recognizer |
14,272,089 | epochs = 30
history_2 = model.fit_generator(datagen.flow(train_x, train_y, batch_size = batch_size), steps_per_epoch = int(train_x.shape[0]/batch_size)+ 1, epochs = epochs, validation_data =(val_x, val_y), callbacks = [lrr])
<compute_train_metric> | y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test ) | Digit Recognizer |
14,272,089 | val_p = np.argmax(model.predict(val_x), axis = 1)
error = 0
confusion_matrix = np.zeros([10, 10])
for i in range(val_x.shape[0]):
confusion_matrix[val_y[i], val_p[i]] += 1
if val_y[i] != val_p[i]:
error += 1
print("Confusion Matrix:
", confusion_matrix)
print("
Errors in validation set: ", error)
print("
Error Persentage: ",(error * 100)/ val_p.shape[0])
print("
Accuracy: ", 100 -(error * 100)/ val_p.shape[0])
print("
Validation set Shape: ", val_p.shape[0] )<predict_on_test> | model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same', input_shape =(28, 28, 1)) ,
tf.keras.layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'),
tf.keras.layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'),
tf.keras.layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(512, activation = 'relu'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(256, activation = 'relu'),
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.35),
tf.keras.layers.Dense(10, activation = 'softmax')
] ) | Digit Recognizer |
14,272,089 | test_y = np.argmax(model.predict(test_x), axis = 1 )<save_to_csv> | model.compile(optimizer = Adam(lr = 1e-3),
loss = 'categorical_crossentropy',
metrics = ['accuracy'] ) | Digit Recognizer |
14,272,089 | df_submission = pd.DataFrame([df_test.index + 1, test_y], ["ImageId", "Label"] ).transpose()
df_submission.to_csv("MySubmission.csv", index = False )<choose_model_class> | datagen = ImageDataGenerator(
rotation_range= 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1
)
datagen.fit(X_train ) | Digit Recognizer |
14,272,089 | tpu = tf.distribute.cluster_resolver.TPUClusterResolver.connect()
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu)
<load_from_csv> | train_generator = datagen.flow(X_train, y_train, batch_size = 64)
validation_generator = datagen.flow(X_test, y_test, batch_size = 64 ) | Digit Recognizer |
14,272,089 | train_dataframe=pd.read_csv(".. /input/digit-recognizer/train.csv")
test_dataframe=pd.read_csv(".. /input/digit-recognizer/test.csv" )<count_values> | learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_loss', patience = 3, verbose = 1, factor = 0.5, min_lr = 1e-6)
model_checkpoint = ModelCheckpoint('./best_model.hdf5',monitor = 'val_loss', mode = "min", verbose = 1, save_best_model = True ) | Digit Recognizer |
14,272,089 | train_dataframe['label'].value_counts()<data_type_conversions> | history = model.fit_generator(
train_generator,
steps_per_epoch = X_train.shape[0] // 64,
epochs = 50,
validation_data = validation_generator,
validation_steps = X_test.shape[0] // 64,
callbacks = [learning_rate_reduction, model_checkpoint]
) | Digit Recognizer |
14,272,089 | train_label = train_dataframe.label.to_numpy()
train_image=train_dataframe.to_numpy() [0:,1:].reshape(42000,28,28,1)
test_image = test_dataframe.to_numpy().reshape(28000,28,28,1 )<data_type_conversions> | model = load_model("./best_model.hdf5" ) | Digit Recognizer |
14,272,089 | train_image = train_image.astype(float)/ 255.0
test_image = test_image.astype(float)/ 255.0<choose_model_class> | model.evaluate(X_test, y_test, verbose = 1 ) | Digit Recognizer |
14,272,089 | with tpu_strategy.scope() :
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3,3), activation='relu',padding = 'Same', input_shape=(28, 28, 1)) ,
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(128,(3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Conv2D(256,(3,3), activation='relu',padding = 'Same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(1024, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
optimizer = Adam(learning_rate=0.001)
model.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
optimizer = optimizer,
metrics=['accuracy'])
epochs = 50
batch_size = 16<split> | test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" ).values
test = test.reshape(-1, 28, 28, 1 ).astype('float32')/ 255.0
y_pred = model.predict(test ).argmax(axis=1)
y_pred.shape | Digit Recognizer |
14,272,089 | x_train,x_val,y_train,y_val=train_test_split(train_image,train_label,test_size=0.2,random_state=42 )<train_model> | submission = pd.DataFrame({'ImageId': np.arange(1, 28001), 'Label': y_pred})
submission.to_csv("submission.csv", index = False)
print("Your submission was successfully saved!" ) | Digit Recognizer |
14,189,071 | history = model.fit(x_train,y_train,batch_size=64,epochs=15,validation_data=(x_val,y_val),shuffle=True )<predict_on_test> | import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Flatten,Dropout,Conv2D,MaxPooling2D, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, Callback
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import sklearn.metrics as metrics
import random
import matplotlib.pyplot as plt
import seaborn as sns | Digit Recognizer |
14,189,071 | val_pred = model.predict(x_val )<prepare_output> | print(tf.config.list_physical_devices('GPU'),'//',tf.test.is_built_with_cuda() ) | Digit Recognizer |
14,189,071 | val_pred1 = np.argmax(val_pred, axis=1 )<predict_on_test> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
train.head() | Digit Recognizer |
14,189,071 | predictions = model.predict(test_image )<prepare_output> | print('Nº of missing values in train set: ', train.isnull().any().sum())
print()
print('Nº of missing values in test set: ', test.isnull().any().sum() ) | Digit Recognizer |
14,189,071 | submission = pd.DataFrame({'ImageId' : range(1,28001), 'Label' : list(subs)})
submission.head(10)
submission.shape<save_to_csv> | X = np.array(train.drop('label',axis=1)) / 255.
X = X.reshape(( -1,28,28,1))
y = np.array(train['label'] ) | Digit Recognizer |
14,189,071 | submission.to_csv("submission1.csv", index = False )<set_options> | train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2)
print('X_train: ' + str(train_X.shape))
print('Y_train: ' + str(train_y.shape))
print('X_test: ' + str(test_X.shape))
print('Y_test: ' + str(test_y.shape)) | Digit Recognizer |
14,189,071 | !nvidia-smi<set_options> | batch_size = 128
epochs = 110
epochs_to_wait_to_improve = 10
num_classes = max(pd.unique(train['label'])) +1
seed = 7
random.seed(seed ) | Digit Recognizer |
14,189,071 | %matplotlib inline
sns.set(style='white', context='notebook', palette='deep')
np.random.seed(2 )<load_from_csv> | datagen = ImageDataGenerator(
rotation_range=12,
width_shift_range=0.11,
height_shift_range=0.11,
shear_range=0.15,
zoom_range = 0.09,
validation_split=0.3,
horizontal_flip=False,
vertical_flip=False
) | Digit Recognizer |
14,189,071 | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' )<prepare_x_and_y> | train_generator = datagen.flow(train_X,
train_y,
batch_size=batch_size,
shuffle=True,
subset='training')
val_generator = datagen.flow(test_X,
test_y,
batch_size=batch_size,
subset='validation' ) | Digit Recognizer |
14,189,071 | y_train = train["label"]
X_train = train.drop(labels=["label"], axis = 1 )<train_model> | model = Sequential()
model.add(Conv2D(32, kernel_size =(3,3), input_shape=(28, 28, 1), padding = 'Same', activation='relu'))
model.add(Conv2D(64, kernel_size =(3,3), padding = 'Same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Dropout(0.25))
model.add(Conv2D(64, kernel_size =(3,3), padding = 'Same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.20))
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.35))
model.add(Dense(num_classes, activation='softmax'))
| Digit Recognizer |
14,189,071 | ( X_train1, y_train1),(X_test1, y_test1)= mnist.load_data()
X_train1 = np.concatenate([X_train1, X_test1], axis=0)
y_train1 = np.concatenate([y_train1, y_test1], axis=0)
X_train1 = X_train1.reshape(-1, 28*28 )<feature_engineering> | class myCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.999):
print("
Reached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
mycallback = myCallback()
early_stopping_callback = EarlyStopping(monitor='val_loss',
patience=epochs_to_wait_to_improve,
verbose = 2,
restore_best_weights=True)
optimizer = Adam(lr=0.001, beta_1=0.9)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
14,189,071 | X_train = X_train/255.
X_train1 = X_train1/255.
test = test/255 .<concatenate> | history = model.fit(train_generator,
epochs=epochs,
validation_data=val_generator,
callbacks=[mycallback,early_stopping_callback] ) | Digit Recognizer |
14,189,071 | X_train = np.concatenate(( X_train.values, X_train1))
y_train = np.concatenate(( y_train, y_train1))<categorify> | test_loss, test_acc = model.evaluate(test_X, test_y, verbose=5)
print('
Test accuracy:', test_acc)
| Digit Recognizer |
14,189,071 | y_train = to_categorical(y_train, num_classes = 10 )<split> | metrics.classification_report(val_trues, val_preds ) | Digit Recognizer |
14,189,071 | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1, random_state = 2 )<train_model> | test_pred = np.array(test/255.)
test_pred = test_pred.reshape(( -1,28,28,1))
test_predictions = model.predict_classes(test_pred ) | Digit Recognizer |
14,189,071 | print(f"Training shape {X_train.shape}
Validation shape {X_val.shape}" )<choose_model_class> | sub_df = {'ImageId':list(range(1, len(test_predictions)+ 1)) ,'Label': test_predictions}
submission = pd.DataFrame(sub_df ).astype('int')
submission.head()
submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
14,208,528 | model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(Conv2D(128,(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(256, kernel_size=(3, 3),
activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512,(3, 3), activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))<save_to_csv> | import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import LearningRateScheduler | Digit Recognizer |
14,208,528 | plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
Image('model.png' )<choose_model_class> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
14,208,528 | optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=["accuracy"] )<choose_model_class> | X_train=X_train/255.0
test=test/255.0 | Digit Recognizer |
14,208,528 | learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.2,
min_lr=0.00001)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15)
checkpoint = ModelCheckpoint(filepath='model.h5', monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True )<choose_model_class> | X_train = X_train.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1 ) | Digit Recognizer |
14,208,528 | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train )<define_variables> | x_train,x_test,y_train,y_test = train_test_split(X_train,Y_train,test_size=0.2 ) | Digit Recognizer |
14,208,528 | epochs = 50
batch_size = 128<train_model> | data_gen = ImageDataGenerator(
rotation_range=12,
width_shift_range=0.12,
height_shift_range=0.12,
shear_range=0.12,
validation_split=0.2, ) | Digit Recognizer |
14,208,528 | history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
epochs=epochs,
validation_data=(X_val, y_val),
verbose=2,
steps_per_epoch=X_train.shape[0]//batch_size,
callbacks=[learning_rate_reduction, es, checkpoint] )<predict_on_test> | data_gen.fit(x_train)
data_gen.fit(x_test ) | Digit Recognizer |
14,208,528 | results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )<save_to_csv> | y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10 ) | Digit Recognizer |
14,208,528 | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission.csv",index=False )<import_modules> | model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation="relu", input_shape=(28,28,1)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
14,208,528 | from matplotlib import pyplot as plt
import os
import scipy
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import seaborn as sns
from sklearn.model_selection import train_test_split
import cv2<load_from_csv> | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
14,208,528 | main_path = r".. /input/digit-recognizer"
train_df = pd.read_csv(os.path.join(main_path, "train.csv"))
test_df = pd.read_csv(os.path.join(main_path, "test.csv"))<prepare_x_and_y> | batch_size = 64
epochs = 15
history = model.fit_generator(data_gen.flow(x_train, y_train, batch_size = batch_size), epochs = epochs,
validation_data =(x_test, y_test), verbose=1,
steps_per_epoch=x_train.shape[0] // batch_size,
)
| Digit Recognizer |
14,208,528 | x_train = train_df.drop(labels=["label"], axis=1)
y_train = train_df["label"]
y_train.head()<categorify> | pred = np.argmax(model.predict(test), axis=1)
sub_df = {'ImageId':list(range(1, len(test)+ 1)) ,'Label':pred}
submission = pd.DataFrame(sub_df ).astype('int')
submission.head() | Digit Recognizer |
14,208,528 | x_train = x_train.to_numpy() / 255.0
x_test = test_df.to_numpy() / 255.0
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
y_train = to_categorical(y_train)
plt.imshow(x_train[125, :, :, :] )<split> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
14,208,528 | x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.15, random_state=2)
datagen = ImageDataGenerator(
rotation_range=27,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.3,
zoom_range=0.2
)<choose_model_class> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
14,085,919 | model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(5,5), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation='softmax'))
model.summary()<choose_model_class> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
train.head() | Digit Recognizer |
14,085,919 | class myCallback(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('val_accuracy')> 0.9955):
print("Stop training!")
self.model.stop_training = True
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
reduce_lr = ReduceLROnPlateau(
monitor='val_accuracy',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001
)
epoch_end = myCallback()
<train_model> | from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical | Digit Recognizer |
14,085,919 | history = model.fit(datagen.flow(x_train, y_train, batch_size=256),
epochs=200, validation_data=(x_val, y_val),
verbose=1, steps_per_epoch=x_train.shape[0]/256,
callbacks=[reduce_lr, epoch_end] )<save_to_csv> | Y_train = to_categorical(train['label'].values, 10)
X_train =(train.loc[:, 'pixel0':] / 255 ).values
X_train.shape, Y_train.shape | Digit Recognizer |
14,085,919 | results = model.predict(x_test)
results = np.argmax(results, axis=1)
submission = pd.read_csv(os.path.join(main_path, "sample_submission.csv"))
image_id = range(1, x_test.shape[0]+1)
submission = pd.DataFrame({'Imageid':image_id, 'Label':results})
submission.to_csv('cnn2_submission.csv', index=False )<set_options> | X_test =(test / 255 ).values | Digit Recognizer |
14,085,919 | %matplotlib inline
<load_from_csv> | datagener = ImageDataGenerator(
rotation_range=15,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
) | Digit Recognizer |
14,085,919 | train = import_data('.. /input/digit-recognizer/train.csv')
test = import_data('.. /input/digit-recognizer/test.csv')
y_lab = train['label']
y = tf.keras.utils.to_categorical(y_lab)
train.drop('label', axis=1, inplace=True )<prepare_x_and_y> | example = X_train[6].reshape(( 1, 28, 28, 1))
label = Y_train[6] | Digit Recognizer |
14,085,919 | train_df = np.array(train ).reshape(-1, 28, 28, 1)
test_df = np.array(test ).reshape(-1, 28, 28, 1)
del train
del test
del y_lab<data_type_conversions> | def lr_scheduler(epoch, lr):
return lr * 0.99 | Digit Recognizer |
14,085,919 | def change_size(image):
img = array_to_img(image, scale=False)
img = img.resize(( 75, 75))
img = img.convert(mode='RGB')
arr = img_to_array(img)
return arr.astype(np.float32 )<drop_column> | X_train, X_valid, Y_train, Y_valid = train_test_split(X_train, Y_train, test_size=0.2 ) | Digit Recognizer |
14,085,919 | train_array = [change_size(img)for img in train_df]
train = np.array(train_array)
del train_array
test_array = [change_size(img)for img in test_df]
test = np.array(test_array)
del test_array<randomize_order> | np.random.seed(42)
tf.random.set_seed(42)
model = models.Sequential()
model.add(Conv2D(96, 3, activation='relu', padding='same', input_shape=(28, 28, 1)))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(160, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(256, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(MaxPooling2D(( 2, 2)))
model.add(Conv2D(64, 3, activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(SpatialDropout2D(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(96, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer=optimizers.Adam(lr=1e-2),
loss='categorical_crossentropy', metrics=['categorical_accuracy'])
checkpoint_path = 'bestmodel.hdf5'
checkpoint = ModelCheckpoint(checkpoint_path, monitor='val_categorical_accuracy',
verbose=0, save_best_only=True, mode='max')
scheduler = LearningRateScheduler(lr_scheduler, verbose=0)
early_stop = EarlyStopping(monitor='val_loss', min_delta=0, patience=15, mode='min', verbose=0)
tqdm_callback = tfa.callbacks.TQDMProgressBar(leave_epoch_progress=False,
leave_overall_progress=True,
show_epoch_progress=False,
show_overall_progress=True)
callbacks_list = [
checkpoint,
scheduler,
tqdm_callback,
]
history = model.fit_generator(datagener.flow(X_train, Y_train, batch_size=150), epochs=225, steps_per_epoch=X_train.shape[0] // 150,
callbacks=callbacks_list,
verbose=1, validation_data=(X_valid, Y_valid)) | Digit Recognizer |
14,085,919 | def get_random_eraser(p=0.5, s_l=0.02, s_h=0.4, r_1=0.3, r_2=1/0.3, v_l=0, v_h=255, pixel_level=False):
def eraser(input_img):
if input_img.ndim == 3:
img_h, img_w, img_c = input_img.shape
elif input_img.ndim == 2:
img_h, img_w = input_img.shape
p_1 = np.random.rand()
if p_1 > p:
return input_img
while True:
s = np.random.uniform(s_l, s_h)* img_h * img_w
r = np.random.uniform(r_1, r_2)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
if left + w <= img_w and top + h <= img_h:
break
if pixel_level:
if input_img.ndim == 3:
c = np.random.uniform(v_l, v_h,(h, w, img_c))
if input_img.ndim == 2:
c = np.random.uniform(v_l, v_h,(h, w))
else:
c = np.random.uniform(v_l, v_h)
input_img[top:top + h, left:left + w] = c
return input_img
return eraser<choose_model_class> | model.load_weights(checkpoint_path)
print(model.evaluate(X_valid, Y_valid))
| Digit Recognizer |
14,085,919 | image_gen = ImageDataGenerator(rescale=1./255,
featurewise_center=False,
preprocessing_function=get_random_eraser(v_l=0, v_h=1),
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zoom_range=0.1,
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.3,
validation_split=0.2)
train_generator = image_gen.flow(train,
y,
batch_size=32,
shuffle=True,
subset='training',
seed=42)
valid_generator = image_gen.flow(train,
y,
batch_size=16,
shuffle=True,
subset='validation')
del train_df
del test_df
del train<choose_model_class> | submit = pd.DataFrame(np.argmax(model.predict(X_test), axis=1), columns=['Label'],
index=pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')['ImageId'])
submit.index.name = 'ImageId'
submit.to_csv('submittion.csv' ) | Digit Recognizer |
14,051,573 | model = Sequential()
model.add(tf.keras.applications.resnet50.ResNet50(input_shape =(75, 75, 3),
pooling = 'avg',
include_top = False,
weights = 'imagenet'))
model.add(L.Flatten())
model.add(L.Dense(128, activation='relu'))
model.add(L.Dense(10, activation='softmax'))
model.compile(optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0), loss='categorical_crossentropy', metrics=['accuracy'])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
<load_pretrained> | seed = 42
np.random.seed(seed ) | Digit Recognizer |
14,051,573 | for layer in model.layers[0].layers:
if layer.name == 'conv5_block1_0_conv':
break
layer.trainable=False<train_model> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
14,051,573 | history = model.fit(train_generator, validation_data=valid_generator, epochs=20,
steps_per_epoch=train_generator.n//train_generator.batch_size,
validation_steps=valid_generator.n//valid_generator.batch_size,
callbacks=[learning_rate_reduction] )<feature_engineering> | train.isnull().any().sum() | Digit Recognizer |
14,051,573 | test = test/255<save_to_csv> | test.isnull().any().sum() | Digit Recognizer |
14,051,573 | res = model.predict(test[:])
output = pd.DataFrame({'ImageId':[ i+1 for i in range(len(res)) ],
'Label': [ xi.argmax() for xi in res]})
output.to_csv('submission_grid.csv', index=False )<set_options> | X = train.iloc[:, 1:]
y = train.iloc[:, 0] | Digit Recognizer |
14,051,573 | warnings.filterwarnings("ignore")
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' )<load_from_csv> | X = X.values.reshape(-1, 28, 28, 1)
test = test.values.reshape(-1, 28, 28, 1 ) | Digit Recognizer |
14,051,573 | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')
print("Data are Ready!!" )<train_model> | def normalize(arr):
return(arr - np.mean(arr)) / np.std(arr ) | Digit Recognizer |
14,051,573 | print(f"Training data size is {train.shape}
Testing data size is {test.shape}" )<prepare_x_and_y> | X = normalize(X)
test = normalize(test ) | Digit Recognizer |
14,051,573 | Y_train = train["label"]
X_train = train.drop(labels = ["label"], axis = 1 )<concatenate> | mean, std = np.mean(X), np.std(X)
print('Mean: %.3f, Standard Deviation: %.3f' %(mean, std)) | Digit Recognizer |
14,051,573 | ( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data()
train1 = np.concatenate([x_train1, x_test1], axis=0)
y_train1 = np.concatenate([y_train1, y_test1], axis=0)
Y_train1 = y_train1
X_train1 = train1.reshape(-1, 28*28 )<feature_engineering> | datagen = ImageDataGenerator(
rotation_range=20,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False ) | Digit Recognizer |
14,051,573 | X_train = X_train / 255.0
test = test / 255.0
X_train1 = X_train1 / 255.0<concatenate> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15 ) | Digit Recognizer |
14,051,573 | X_train = np.concatenate(( X_train.values, X_train1))
Y_train = np.concatenate(( Y_train, Y_train1))<categorify> | datagen.fit(X_train ) | Digit Recognizer |
14,051,573 | Y_train = to_categorical(Y_train, num_classes = 10 )<split> | y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10 ) | Digit Recognizer |
14,051,573 | X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )<choose_model_class> | model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last',
input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last',
input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', strides=1, padding='same', data_format='channels_last'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid'))
model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(512, activation='relu'))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
14,051,573 |
model = Sequential()
model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation = "softmax"))<choose_model_class> | optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999 ) | Digit Recognizer |
14,051,573 | optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )<choose_model_class> | model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['categorical_accuracy'] ) | Digit Recognizer |
14,051,573 | model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )<choose_model_class> | reduce_lr = LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x ) | Digit Recognizer |
14,051,573 | learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 )<define_variables> | batch_size = 64
epochs = 50 | Digit Recognizer |
14,051,573 | epochs = 50
batch_size = 128<define_variables> | history = model.fit_generator(datagen.flow(X_train, y_train, batch_size = batch_size), epochs = epochs,
validation_data =(X_test, y_test), verbose=1,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks = [reduce_lr] ) | Digit Recognizer |
14,051,573 | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
train_gen = datagen.flow(X_train,Y_train, batch_size=batch_size )<train_model> | preds = np.argmax(model.predict(test), axis=1)
sub_df = {'ImageId':list(range(1, len(test)+ 1)) ,'Label':preds}
submission = pd.DataFrame(sub_df ).astype('int')
submission.head() | Digit Recognizer |
14,051,573 | history = model.fit(train_gen,
epochs = epochs,validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction],
validation_steps = X_val.shape[0] // batch_size )<compute_test_metric> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
14,051,573 | <compute_train_metric><EOS> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
11,978,455 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test> | train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
11,978,455 | results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" )<save_to_csv> | train.isnull().sum().sum() | Digit Recognizer |
11,978,455 | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_submission.csv",index=False )<train_model> | labels = train["label"]
pureimg_train = train.drop(labels = ["label"], axis = 1)
del train | Digit Recognizer |
11,978,455 | ( x_train1, y_train1),(x_test1, y_test1)= mnist.load_data()
Y_train1 = y_train1
X_train1 = x_train1.reshape(-1, 28*28 )<load_from_csv> | norm_train = pureimg_train/255
norm_test = test/255 | Digit Recognizer |
11,978,455 | train_data = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_data = pd.read_csv('.. /input/digit-recognizer/test.csv' )<prepare_x_and_y> | feature_train, feature_validate, target_train, target_validate = train_test_split(norm_train, labels, test_size = 0.1, random_state = 0)
| Digit Recognizer |
11,978,455 | train_images = train_data.copy()
train_images = train_images.values
X_train = train_images[:,1:]
y_train = train_images[:,0]
X_test = test_data.values<define_variables> | Test = torch.from_numpy(norm_test.values.reshape(( -1,1,28,28)))
featuresTrain = torch.from_numpy(feature_train.values.reshape(( -1,1,28,28)))
targetsTrain = torch.from_numpy(target_train.values)
featuresValidation = torch.from_numpy(feature_validate.values.reshape(( -1,1,28,28)))
targetsValidation = torch.from_numpy(target_validate.values ) | Digit Recognizer |
11,978,455 | predictions = np.zeros(( X_train.shape[0]))<find_best_params> | batch_size = 88 | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.