kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,345,485 | imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
for col in continious_features:
imp_mean.fit(train_set[col].values.reshape(-1,1))
train_set[col] = imp_mean.transform(train_set[col].values.reshape(-1,1))
test_set[col] = imp_mean.transform(test_set[col].values.reshape(-1,1))
train_set[col] = np.log(train_set[col] + 1)
test_set[col] = np.log(test_set[col] + 1 )<count_missing_values> | pred = model.predict_classes(test_x, verbose=1 ) | Digit Recognizer |
10,345,485 | train_set[norminal_features].isnull().sum()<count_missing_values> | sub['Label'] = pred
sub.to_csv("sub.csv", index=False)
sub.head() | Digit Recognizer |
12,686,477 | test_set[norminal_features].isnull().sum()<data_type_conversions> | import time
import torch
import matplotlib.pyplot as plt
from torch import nn, optim
from torchvision import transforms
from torch.utils.data import TensorDataset, DataLoader | Digit Recognizer |
12,686,477 | for col in norminal_features:
train_set[col] = train_set[col].astype(str)
test_set[col] = test_set[col].astype(str)
train_set[col] = train_set[col].fillna('Unknown')
test_set[col] = test_set[col].fillna('Unknown' )<categorify> | epochs = 50
batch_size = 32
lr = 1e-3
momentum = 0.9
weight_decay = 1e-4
train_ratio = 0.8
device = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) | Digit Recognizer |
12,686,477 | ohe = OneHotEncoder(handle_unknown = "ignore", sparse=False)
ohe_train_set = pd.DataFrame(ohe.fit_transform(train_set[norminal_features]))
ohe_test_set= pd.DataFrame(ohe.transform(test_set[norminal_features]))
ohe_train_set.index = train_set.index
ohe_test_set.index = test_set.index<drop_column> | train_df = np.array(pd.read_csv('/kaggle/input/digit-recognizer/train.csv'))
imgs = train_df[:,1:].reshape(-1, 28, 28)
labels = train_df[:,0]
imgs = np.expand_dims(imgs, axis=1)/ 255.0
num_train = int(imgs.shape[0] * train_ratio)
train_imgs = imgs[:num_train]
train_labels = labels[:num_train]
val_imgs = imgs[num_train:]
val_labels = labels[num_train:]
train_dataset = TensorDataset(torch.tensor(train_imgs, dtype=torch.float), torch.tensor(train_labels))
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
val_dataset = TensorDataset(torch.tensor(val_imgs, dtype=torch.float), torch.tensor(val_labels))
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, pin_memory=True)
| Digit Recognizer |
12,686,477 | train_set = train_set.drop(norminal_features, axis = 1)
test_set = test_set.drop(norminal_features, axis = 1 )<concatenate> | def conv_cell(cin, cout, k, s, p=None):
if not p:
p =(k-1)// 2
cell = nn.Sequential()
cell.add_module('conv', nn.Conv2d(in_channels=cin, out_channels=cout, kernel_size=k, stride=s, padding=p, bias=False))
cell.add_module('bn', nn.BatchNorm2d(cout))
cell.add_module('act', nn.ReLU())
return cell
net = nn.Sequential(
conv_cell(1, 32, 3, 2),
conv_cell(32, 32, 3, 1),
conv_cell(32, 32, 3, 1),
conv_cell(32, 64, 3, 2),
conv_cell(64, 64, 3, 1),
conv_cell(64, 64, 3, 1),
nn.AdaptiveAvgPool2d(1),
nn.Flatten() ,
nn.Linear(64, 10)
)
print(net)
net = net.to(device ) | Digit Recognizer |
12,686,477 | train_base = train_set.copy()
test_base = test_set.copy()
train_set = pd.concat([train_set, ohe_train_set], axis=1)
test_set = pd.concat([test_set, ohe_test_set], axis =1 )<count_missing_values> | loss_fn = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters() , lr=lr, momentum=momentum, weight_decay=weight_decay ) | Digit Recognizer |
12,686,477 | test_set[discrete_features].isnull().sum()<count_missing_values> | def accuracy(dataloader, net):
total = 0
correct = 0
with torch.no_grad() :
for data in dataloader:
inputs, labels = data[0].to(device), data[1].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct +=(predicted == labels ).sum().item()
return correct/total | Digit Recognizer |
12,686,477 | train_set[discrete_features].isnull().sum()
test_set[discrete_features].isnull().sum()
for col in discrete_features:
test_set[col] = test_set[col].fillna(0 )<drop_column> | best = 0
train_losses = []
train_accuracies = []
val_accuracies = []
for epoch in range(epochs):
tic = time.time()
epoch_loss = 0
for i, data in enumerate(train_loader):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = loss_fn(outputs, labels)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if i % 100 == 99:
print("[Epoch {}/{} Batch {}] lr: {}, loss: {:.4f}".format(
epoch+1, epochs, i+1, optimizer.param_groups[0]['lr'], epoch_loss/(i+1)))
toc = time.time()
train_acc = accuracy(train_loader, net)
val_acc = accuracy(val_loader, net)
print("Epoch {}, time: {:.2f}s, train accuracy: {:.4f}, val accuracy: {:.4f}".format(
epoch+1, toc-tic, train_acc, val_acc))
print('-'*30)
train_losses.append(epoch_loss/(i+1))
train_accuracies.append(train_acc)
val_accuracies.append(val_acc)
if val_acc > best:
best = val_acc
torch.save(net.state_dict() , 'mnist_best.pt')
torch.save(net.state_dict() , 'mnist_last.pt')
print("Training complete.Best validation accuracy:", best ) | Digit Recognizer |
12,686,477 | correlation_features = corr_cols.append(corr_cols_small ).drop('SalePrice' )<drop_column> | test_df = np.array(pd.read_csv('/kaggle/input/digit-recognizer/test.csv'))
test_imgs = test_df.reshape(-1, 28, 28)
test_imgs = np.expand_dims(test_imgs, axis=1)/ 255.0
test_dataset = TensorDataset(torch.tensor(test_imgs, dtype=torch.float))
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
net.load_state_dict(torch.load('mnist_best.pt'))
net.eval()
test_results = []
for data in test_loader:
inputs = data[0].to(device)
outputs = net(inputs)
_, predicted = torch.max(outputs.data, axis=1)
test_results.extend(predicted.tolist() ) | Digit Recognizer |
12,686,477 | <define_variables><EOS> | num_test = len(test_results)
test_df = pd.DataFrame(np.vstack(( np.arange(1, num_test+1), np.array(test_results)) ).T, columns=['ImageId', 'Label'])
test_df.to_csv('submission.csv', index=False ) | Digit Recognizer |
12,354,348 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<split> | def load_train_data(split=.0):
with open('.. /input/digit-recognizer/train.csv')as file:
data = np.asarray([x.replace('
', '' ).split(',')for x in file.readlines() [1:]])
train_data = data.astype('float32')[:, 1:].reshape(( len(data), 28, 28, 1))
train_labels = to_categorical(data[:, 0])
indices = np.arange(len(data))
np.random.shuffle(indices)
train_data = train_data[indices]
train_labels = train_labels[indices]
if split >.0:
stop = int(len(data)* split)
val_data = train_data[:stop]
val_labels = train_labels[:stop]
train_data = train_data[stop:]
train_labels = train_labels[stop:]
return(train_data, train_labels),(val_data, val_labels)
else:
return(train_data, train_labels)
def load_test_data() :
with open('.. /input/digit-recognizer/test.csv')as file:
data = np.asarray([x.replace('
', '' ).split(',')for x in file.readlines() [1:]])
test_data = data.astype('float32' ).reshape(( data.shape[0], 28, 28, 1)) / 255
return test_data
def get_model() :
model = models.Sequential()
model.add(layers.Conv2D(32,(3, 3), use_bias=False, padding='Same', input_shape=(28, 28, 1)))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(32,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(( 2, 2)))
model.add(layers.Dropout (.4))
model.add(layers.Conv2D(64,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(64,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(( 2, 2)))
model.add(layers.Dropout (.4))
model.add(layers.Conv2D(128,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(128,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Conv2D(128,(3, 3), use_bias=False, padding='Same'))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.MaxPooling2D(( 2, 2)))
model.add(layers.Dropout (.4))
model.add(layers.Flatten())
model.add(layers.Dense(512, use_bias=False))
model.add(layers.BatchNormalization())
model.add(layers.Activation('relu'))
model.add(layers.Dropout (.4))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def plot_losses(history):
train_loss = history.history.get('loss')
val_loss = history.history.get('val_loss', None)
train_acc = history.history.get('accuracy')
val_acc = history.history.get('val_accuracy', None)
epochs = np.arange(len(train_loss)) + 1
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(epochs, train_loss, color='blue', label='Training loss')
if val_loss:
plt.plot(epochs, val_loss, color='red', label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.grid()
plt.subplot(122)
plt.plot(epochs, train_acc, color='blue', label='Training acc')
if val_acc:
plt.plot(epochs, val_acc, color='red', label='Validation acc')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.grid()
plt.show()
def confirm_submission(predict):
data = [f'{n + 1},{x}
' for n, x in enumerate(predict)]
with open('submission.csv', 'w')as file:
file.write('ImageID,Label
')
for line in data:
file.write(line)
| Digit Recognizer |
12,354,348 | train_x, val_x, train_y, val_y = train_test_split(train, target, train_size = 0.8, random_state = RANDOM_STATE )<normalization> | ( train_data, train_labels),(val_data, val_labels)= load_train_data(split=.2)
batch_size = 64
train_gen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
zoom_range=.1,
width_shift_range=.1,
height_shift_range=.1,
fill_mode='nearest' ).flow(
train_data, train_labels,
batch_size=batch_size)
val_gen = ImageDataGenerator(
rescale=1./255 ).flow(
val_data, val_labels,
batch_size=batch_size)
model = get_model()
history = model.fit(train_gen,
steps_per_epoch=len(train_data)// batch_size,
validation_data=val_gen,
validation_steps=len(val_data)// batch_size,
epochs=100)
plot_losses(history)
| Digit Recognizer |
12,354,348 | std_scaler = StandardScaler()
std_scaler.fit(train_x)
train_x = std_scaler.transform(train_x)
val_x = std_scaler.transform(val_x)
test_set = std_scaler.transform(test_set)
<define_variables> | train_data, train_labels = load_train_data()
batch_size = 64
train_gen = ImageDataGenerator(
rescale=1./255,
rotation_range=10,
zoom_range=.1,
width_shift_range=.1,
height_shift_range=.1,
fill_mode='nearest' ).flow(
train_data, train_labels,
batch_size=batch_size)
model = get_model()
history = model.fit(train_gen,
steps_per_epoch=len(train_data)// batch_size,
epochs=100)
model.save('mnist.h5')
plot_losses(history ) | Digit Recognizer |
12,354,348 | <split><EOS> | test_data = load_test_data()
model = models.load_model('mnist.h5')
predict = np.argmax(model.predict(test_data), axis=1)
confirm_submission(predict ) | Digit Recognizer |
13,111,649 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<normalization> | import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os | Digit Recognizer |
13,111,649 | std_scaler_base = StandardScaler()
std_scaler_base.fit(train_x_base)
train_x_base = std_scaler_base.transform(train_x_base)
val_x_base = std_scaler_base.transform(val_x_base )<train_model> | train_input = ".. /input/digit-recognizer/train.csv"
test_input = ".. /input/digit-recognizer/test.csv"
train_dataset = pd.read_csv(train_input)
test_dataset = pd.read_csv(test_input ) | Digit Recognizer |
13,111,649 | logreg = LinearRegression()
start_time = time.time()
logreg.fit(train_x_base, train_y_base)
time_lapse = time.time() - start_time
model_type.append('LogReg Baseline')
model_train_time.append(time_lapse)
time_lapse
<predict_on_test> | train_labels = tf.keras.utils.to_categorical(train_dataset.pop("label")) | Digit Recognizer |
13,111,649 | logreg_pred_train = logreg.predict(train_x_base)
logreg_train_rmse = evaluation_metric(logreg_pred_train, train_y_base)
model_rmse_train.append(logreg_train_rmse)
logreg_train_rmse<predict_on_test> | train_dataset = np.array(train_dataset.values.reshape(-1, 28, 28, 1))
test_dataset = np.array(test_dataset.values.reshape(-1, 28, 28, 1)) | Digit Recognizer |
13,111,649 | start_time = time.time()
logreg_pred_val = logreg.predict(val_x_base)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse )<compute_test_metric> | train_dataset = train_dataset / 255.0
test_dataset = test_dataset / 255.0 | Digit Recognizer |
13,111,649 | %time logreg_val_rmse = evaluation_metric(logreg_pred_val, val_y_base )<concatenate> | checkpoint_path = "logs/checkpoints/" | Digit Recognizer |
13,111,649 | model_rmse_val.append(logreg_val_rmse)
logreg_val_rmse<create_dataframe> | model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64,(3, 3), input_shape=(28, 28, 1), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Conv2D(64,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Conv2D(128,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Conv2D(128,(3, 3), activation=tf.nn.relu, padding="SAME"),
tf.keras.layers.MaxPooling2D() ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax")
])
epochs=50
callbacks = [
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
monitor="accuracy",
save_best_only=True,
save_weights_only=True),
tf.keras.callbacks.EarlyStopping(monitor="accuracy", patience=15)
]
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_dataset, train_labels, epochs=epochs, callbacks=callbacks, batch_size=64 ) | Digit Recognizer |
13,111,649 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<split> | model.load_weights(checkpoint_path ) | Digit Recognizer |
13,111,649 | train_x_corr, val_x_corr, train_y_corr, val_y_corr = train_test_split(train[correlation_features], target, train_size = 0.8, random_state = RANDOM_STATE)
logreg_corr = LinearRegression()
start_time = time.time()
logreg_corr.fit(train_x_corr, train_y_corr)
time_lapse = time.time() - start_time
model_type.append('LogReg - Correlation Features')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | labels = [np.argmax(predict)for predict in model.predict(test_dataset)]
df = pd.DataFrame({
"ImageId": list(range(1, len(test_dataset)+1)) ,
"Label": labels,
} ) | Digit Recognizer |
13,111,649 | logreg_corr_pred_train = logreg_corr.predict(train_x_corr)
logreg_corr_train_rmse = evaluation_metric(logreg_corr_pred_train, train_y_corr)
model_rmse_train.append(logreg_corr_train_rmse)
logreg_corr_train_rmse<predict_on_test> | df.to_csv("submission.csv", index=False ) | Digit Recognizer |
13,111,649 | <compute_train_metric><EOS> | model.save("model.h5" ) | Digit Recognizer |
14,439,479 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe> | %matplotlib inline
| Digit Recognizer |
14,439,479 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<compute_train_metric> | tf.__version__ | Digit Recognizer |
14,439,479 | def make_mi_scores(train_data, target_data):
mi_scores = mutual_info_regression(train_data, target_data)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=train.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
mi_scores = make_mi_scores(train_x, train_y)
mi_scores<filter> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' ) | Digit Recognizer |
14,439,479 | mi_features = mi_scores[mi_scores > 0.1].index.values<split> | X = train.drop(['label'], axis=1 ).values
y = train['label'].values | Digit Recognizer |
14,439,479 | train_x_mi, val_x_mi, train_y_mi, val_y_mi = train_test_split(train[mi_features], target, train_size = 0.8, random_state = RANDOM_STATE)
logreg_mi = LinearRegression()
start_time = time.time()
logreg_mi.fit(train_x_mi, train_y_mi)
time_lapse = time.time() - start_time
model_type.append('LogReg - Mutual Information Features')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | train_X, val_X, train_y, val_y = train_test_split(X, y, test_size = 0.2 ) | Digit Recognizer |
14,439,479 | logreg_mi_pred_train = logreg_mi.predict(train_x_mi)
logreg_mi_train_rmse = evaluation_metric(logreg_mi_pred_train, train_y_mi)
model_rmse_train.append(logreg_mi_train_rmse)
logreg_mi_train_rmse<predict_on_test> | train_X = train_X.reshape(-1, 28, 28, 1 ).astype("float32")/255.0
val_X = val_X.reshape(-1, 28, 28, 1 ).astype("float32")/255.0
test = test.values.reshape(-1, 28, 28, 1 ).astype("float32")/255.0
train_y = tf.keras.utils.to_categorical(train_y)
val_y = tf.keras.utils.to_categorical(val_y ) | Digit Recognizer |
14,439,479 | start_time = time.time()
logreg_mi_pred_val = logreg_mi.predict(val_x_mi)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse )<compute_train_metric> | ! git clone https://github.com/bckenstler/CLR.git | Digit Recognizer |
14,439,479 | logreg_mi_val_rmse = evaluation_metric(logreg_mi_pred_val, val_y_mi)
model_rmse_val.append(logreg_mi_val_rmse)
logreg_mi_val_rmse<create_dataframe> | clr_triangular = CyclicLR(mode='triangular' ) | Digit Recognizer |
14,439,479 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<train_model> | class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.9999):
print("
Reached 99.99% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
learning_rate_reduction = tf.keras.callbacks.ReduceLROnPlateau(monitor = 'val_loss', patience = 3, verbose = 1, factor = 0.5, min_lr = 1e-6 ) | Digit Recognizer |
14,439,479 | lreg = Lasso(random_state = RANDOM_STATE)
start_time = time.time()
lreg.fit(train_x, train_y)
time_lapse = time.time() - start_time
model_type.append('Lasso')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | IMG_SIZE = 28
BATCH_SIZE = 64
AUTOTUNE = tf.data.experimental.AUTOTUNE
num_classes = 10
def augment(image_label, seed):
image, label = image_label
image = tf.image.resize_with_crop_or_pad(
image,
IMG_SIZE + 6,
IMG_SIZE + 6
)
new_seed = tf.random.experimental.stateless_split(seed, num=1)[0,:]
image = tf.image.stateless_random_crop(
image,
size=(BATCH_SIZE, IMG_SIZE, IMG_SIZE, 1),
seed=seed
)
image = tf.image.stateless_random_brightness(
image,
max_delta=0.5,
seed=new_seed
)
image = tf.clip_by_value(
image,
0,
1)
return image, label
def create_train_val(train_ds, val_ds):
train_ds =(
train_ds
.shuffle(1000)
.map(f, num_parallel_calls=AUTOTUNE)
.prefetch(AUTOTUNE)
)
val_ds =(
val_ds
.prefetch(AUTOTUNE)
)
return train_ds, val_ds
def create_model(train_ds, val_ds, epochs):
model = tf.keras.Sequential([
layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same', input_shape=(28,28,1)) ,
layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same'),
layers.BatchNormalization() ,
layers.MaxPooling2D(2, 2),
layers.Dropout(0.2),
layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'),
layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'),
layers.BatchNormalization() ,
layers.MaxPooling2D(2, 2),
layers.Dropout(0.2),
layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'),
layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'),
layers.BatchNormalization() ,
layers.MaxPooling2D(2, 2),
layers.Dropout(0.2),
layers.Flatten() ,
layers.Dense(512, activation = 'relu'),
layers.BatchNormalization() ,
layers.Dropout(0.2),
layers.Dense(256, activation = 'relu'),
layers.BatchNormalization() ,
layers.Dropout(0.35),
layers.Dense(10, activation = 'softmax')
])
model.compile(optimizer=Adam(0.1),
loss=tf.keras.losses.CategoricalCrossentropy() ,
metrics=['accuracy'])
print(model.summary())
return model
rng = tf.random.Generator.from_seed(123, alg='philox')
def f(x, y):
seed = rng.make_seeds(2)[0]
image, label = augment(( x, y), seed)
return image, label
train_ds = tf.data.Dataset.from_tensor_slices(( train_X, train_y)).batch(BATCH_SIZE)
val_ds = tf.data.Dataset.from_tensor_slices(( val_X, val_y)).batch(BATCH_SIZE)
test_ds = tf.data.Dataset.from_tensor_slices(( test)).batch(BATCH_SIZE)
train_ds, val_ds = create_train_val(train_ds, val_ds)
cnn_model = create_model(train_ds, val_ds, 100 ) | Digit Recognizer |
14,439,479 | lreg_pred_train = lreg.predict(train_x)
lreg_pred_train_rmse = evaluation_metric(lreg_pred_train, train_y)
model_rmse_train.append(lreg_pred_train_rmse)
lreg_pred_train_rmse<predict_on_test> | startTime = timeit.default_timer()
history = cnn_model.fit(
train_ds,
steps_per_epoch = train_X.shape[0] // BATCH_SIZE,
epochs = 50,
validation_data = val_ds,
validation_steps = val_X.shape[0] // BATCH_SIZE,
callbacks = [callbacks, learning_rate_reduction]
)
elapsedTime = timeit.default_timer() - startTime
print("Time taken for the Network to train : ",elapsedTime ) | Digit Recognizer |
14,439,479 | start_time = time.time()
lreg_pred = lreg.predict(val_x)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse )<compute_test_metric> | startTime = timeit.default_timer()
history = cnn_model.fit(
train_ds,
steps_per_epoch = train_X.shape[0] // BATCH_SIZE,
epochs = 50,
validation_data = val_ds,
validation_steps = val_X.shape[0] // BATCH_SIZE,
callbacks = [clr_triangular]
)
elapsedTime = timeit.default_timer() - startTime
print("Time taken for the Network to train : ",elapsedTime ) | Digit Recognizer |
14,439,479 | lreg_rmse = evaluation_metric(lreg_pred, val_y)
model_rmse_val.append(lreg_rmse)
lreg_rmse<create_dataframe> | prediction = cnn_model.predict(test ).argmax(axis=1)
sub['Label'] = prediction
sub.to_csv("MNIST_sub_cnn1.csv", index=False)
sub.head() | Digit Recognizer |
14,184,820 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<train_model> | np.random.seed(42)
torch.random.seed = 42 | Digit Recognizer |
14,184,820 | rfr=RandomForestRegressor(max_features=0.4, n_estimators=1000, random_state=RANDOM_STATE)
start_time = time.time()
rfr.fit(train_x, train_y)
time_lapse = time.time() - start_time
model_type.append('Random Forest')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | train_df = pd.read_csv(dirname+'/train.csv')
test_df = pd.read_csv(dirname + '/test.csv')
train_df.head() | Digit Recognizer |
14,184,820 | rfr_pred_train = rfr.predict(train_x)
rfr_pred_train_rmse = evaluation_metric(rfr_pred_train, train_y)
model_rmse_train.append(rfr_pred_train_rmse)
rfr_pred_train_rmse<predict_on_test> | train = train_df.to_numpy()
test = test_df.to_numpy() | Digit Recognizer |
14,184,820 | start_time = time.time()
rfr_pred_val = rfr.predict(val_x)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse)
<compute_test_metric> | @Transform
def get_label(a):
return a[0]
@Transform
def get_x(a):
return a[-28*28:]
@Transform
def nums_to_tensor(a):
return TensorImageBW(torch.from_numpy(a ).view(1,28,28))
| Digit Recognizer |
14,184,820 | rfr_rmse = evaluation_metric(rfr_pred_val, val_y)
model_rmse_val.append(rfr_rmse)
rfr_rmse<create_dataframe> | x_tfms = [get_x,nums_to_tensor, ]
y_tfms = [get_label, Categorize]
cut = int(len(train)*0.8)
splits = [list(range(cut)) , list(range(cut,len(train)-1)) ]
bs = 128 | Digit Recognizer |
14,184,820 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<train_model> | dsets = Datasets(train,[x_tfms, y_tfms], splits = splits)
dls = dsets.dataloaders(bs=bs, after_batch=[IntToFloatTensor() ,*aug_transforms(do_flip=False,batch=True,), Normalize.from_stats(*mnist_stats)] ) | Digit Recognizer |
14,184,820 | mnet = MLPRegressor(hidden_layer_sizes=(200,200,100, 100, 50, 25), activation = 'logistic', alpha = 0.001, max_iter = 1000, early_stopping = True, random_state = RANDOM_STATE)
start_time = time.time()
mnet.fit(train_x, train_y)
time_lapse = time.time() - start_time
model_type.append('Neural Net')
model_train_time.append(time_lapse)
time_lapse
<predict_on_test> | test_dl =dls.test_dl(test ) | Digit Recognizer |
14,184,820 | mnet_pred_train = mnet.predict(train_x)
mnet_pred_train_rmse = evaluation_metric(mnet_pred_train, train_y)
model_rmse_train.append(mnet_pred_train_rmse)
mnet_pred_train_rmse<predict_on_test> | model = xresnet18(c_in=1,n_out=10, sa=True, act_cls=Mish ) | Digit Recognizer |
14,184,820 | start_time = time.time()
mnet_pred_val = mnet.predict(val_x)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse )<compute_test_metric> | learn = Learner(dls,model, loss_func=CrossEntropyLossFlat() , metrics=accuracy, ) | Digit Recognizer |
14,184,820 | mnet_rmse = evaluation_metric(mnet_pred_val, val_y)
model_rmse_val.append(mnet_rmse)
mnet_rmse<create_dataframe> | lr_min, lr_steep = learn.lr_find() ;lr_min, lr_steep | Digit Recognizer |
14,184,820 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<train_model> | learn.fit_one_cycle(20,lr_min ) | Digit Recognizer |
14,184,820 | params = {'n_estimators': 1000,
'max_depth': 5,
'min_samples_split': 6,
'learning_rate': 0.01,
'loss': 'ls',
'max_features':0.25,
'random_state':RANDOM_STATE}
gbm = GradientBoostingRegressor(**params)
start_time = time.time()
gbm.fit(train_x, train_y)
time_lapse = time.time() - start_time
model_type.append('Gradient Boosting')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | learn.save('20epoch_Xresnet18-sa' ) | Digit Recognizer |
14,184,820 | gbm_pred_train = gbm.predict(train_x)
gbm_pred_train_rmse = evaluation_metric(gbm_pred_train, train_y)
model_rmse_train.append(gbm_pred_train_rmse)
gbm_pred_train_rmse<predict_on_test> | learn = learn.load('20epoch_Xresnet18-sa' ) | Digit Recognizer |
14,184,820 | start_time = time.time()
gbm_pred_val = gbm.predict(val_x)
time_lapse = time.time() - start_time
model_pred_time.append(time_lapse )<compute_test_metric> | learn.fit_one_cycle(2,lr_min/50 ) | Digit Recognizer |
14,184,820 | gbm_rmse = evaluation_metric(gbm_pred_val, val_y)
model_rmse_val.append(gbm_rmse)
gbm_rmse<create_dataframe> | learn.save('20+2epoch_Xresnet18-sa' ) | Digit Recognizer |
14,184,820 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<train_model> | preds,targets = learn.tta()
accuracy(preds,targets ) | Digit Recognizer |
14,184,820 | param = {'n_estimators': 1000,
'max_depth': 5,
'learning_rate': 0.01,
'random_state':RANDOM_STATE}
xgb = XGBRegressor(**param)
start_time = time.time()
xgb.fit(train_x, train_y, eval_set=[(train_x, train_y),(val_x, val_y)], early_stopping_rounds = 5, eval_metric = 'rmse')
time_lapse = time.time() - start_time
model_type.append('XGBoost')
model_train_time.append(time_lapse)
time_lapse<predict_on_test> | preds,_ = learn.tta(dl=test_dl ) | Digit Recognizer |
14,184,820 | xgb_pred_train = xgb.predict(train_x)
xgb_pred_train_rmse = evaluation_metric(xgb_pred_train, train_y)
model_rmse_train.append(xgb_pred_train_rmse)
xgb_pred_train_rmse<predict_on_test> | class_preds =np.argmax(preds,axis=1)
pred_submission = [dls.vocab[i] for i in class_preds] | Digit Recognizer |
14,184,820 | <compute_test_metric><EOS> | submission = pd.read_csv(dirname+'/sample_submission.csv')
submission['Label']=pred_submission
submission.to_csv('submission.csv',index=False ) | Digit Recognizer |
13,630,920 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe> | %matplotlib inline
| Digit Recognizer |
13,630,920 | result = pd.DataFrame(
{
'feature': model_type,
'train time': model_train_time,
'validation prediction time':model_pred_time,
'rmse train':model_rmse_train,
'rmse validation':model_rmse_val
}
)
result.sort_values(by='rmse validation' )<predict_on_test> | train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('Training on CPU...')
else:
print('Training on GPU...')
| Digit Recognizer |
13,630,920 | test_pred = gbm.predict(test_set)
test_result = pd.DataFrame(
{
'Id': test_set_Id,
'SalePrice': np.exp(test_pred),
} )<save_to_csv> | device = torch.device('cuda:0' ) | Digit Recognizer |
13,630,920 | test_result.to_csv('submission_gbm.csv', index=False )<load_from_csv> | df_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
df_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
13,630,920 | p_train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
p_test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' )<feature_engineering> | df_train.isna().any().any() | Digit Recognizer |
13,630,920 | def create_new_columns(dataset):
dataset['total_square_footage'] = dataset['TotalBsmtSF'] + dataset['GrLivArea']
dataset['remodelling_done'] = dataset.apply(lambda x: verify_remodelling(x['YearRemodAdd'], x['YearBuilt']), axis=1)
dataset['selling_season'] = dataset.apply(lambda x: selling_period(x['MoSold']), axis=1)
dataset['total_area_of_house'] = dataset['TotalBsmtSF'] + dataset['1stFlrSF'] + dataset['2ndFlrSF']
dataset['age_of_building'] = dataset['YearBuilt'].apply(lambda x: pd.datetime.now().year - x)
return dataset
def map_ordinal_features(dataset):
for col in ordinal_cat_features:
dataset[col] = dataset[col].map(ordinal_ranking)
dataset[col] = dataset[col].fillna(0)
return dataset
def log_continious(dataset):
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
for col in continious_features:
imp_mean.fit(dataset[col].values.reshape(-1,1))
dataset[col] = imp_mean.transform(dataset[col].values.reshape(-1,1))
dataset[col] = np.log(dataset[col] + 1)
return dataset
def nominal_one_hot(dataset):
for col in norminal_features:
dataset[col] = dataset[col].astype(str)
dataset[col] = dataset[col].fillna('Unknown')
return dataset
def discrete_fill(dataset):
for col in discrete_features:
dataset[col] = dataset[col].fillna(0)
return dataset
def target_feature(train):
target = np.log(train['SalePrice'] + 1)
return target<feature_engineering> | train_data = df_train.drop('label',axis=1 ).values | Digit Recognizer |
13,630,920 | target = target_feature(p_train )<categorify> | train_data = train_data/255.0
df_test = df_test/255.0 | Digit Recognizer |
13,630,920 | pipes = Pipeline(steps = [('new_columns', FunctionTransformer(create_new_columns)) ,
('continious_cleanup', FunctionTransformer(log_continious)) ,
('nominal_filling', FunctionTransformer(nominal_one_hot)) ,
('discrete_filling',FunctionTransformer(discrete_fill)) ,
('ordinal_cleanup', FunctionTransformer(map_ordinal_features)) ,
])
pipes.fit(p_train )<categorify> | mask = np.random.rand(len(df_train)) < 0.8
df_val = df_train[~mask]
df_train = df_train[mask]
print('Train size: ', df_train.shape)
print('Val size: ', df_val.shape)
print('Test size: ', df_test.shape)
df_train.head() | Digit Recognizer |
13,630,920 | p_train = pipes.transform(p_train )<categorify> | class DatasetMNIST(torch.utils.data.Dataset):
def __init__(self, data, transform=None):
self.data = data
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
item = self.data.iloc[index]
image = item[1:].values.astype(np.uint8 ).reshape(( 28, 28))
label = item[0]
if self.transform is not None:
image = self.transform(image)
return image, label | Digit Recognizer |
13,630,920 | p_test = pipes.transform(p_test )<split> | train_transform = transforms.Compose(
[
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=train_data.mean() , std=train_data.std()),
])
val_transform = transforms.Compose(
[
transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=train_data.mean() , std=train_data.std()),
])
test_transform = val_transform | Digit Recognizer |
13,630,920 | p_test_Id = p_test['Id']
p_train = p_train.drop(columns=['SalePrice', 'Id'])
p_test = p_test.drop(columns = 'Id')
p_train_x, p_val_x, p_train_y, p_val_y = train_test_split(p_train, target, train_size=0.8, test_size=0.2, random_state = RANDOM_STATE )<categorify> | train_dataset = DatasetMNIST(df_train, transform = train_transform)
validation_dataset = DatasetMNIST(df_val, transform = val_transform)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=16)
validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=16)
| Digit Recognizer |
13,630,920 | categorical_transformer = OneHotEncoder(handle_unknown = "ignore", sparse=False)
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
preprocessor = ColumnTransformer(remainder='passthrough',
transformers=[
('num', numeric_transformer, selector(dtype_exclude="object")) ,
('cat', categorical_transformer, selector(dtype_include="object"))
] )<choose_model_class> | class CNN(nn.Module):
def __init__(self):
super(CNN,self ).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1),
nn.ReLU() ,
nn.BatchNorm2d(32),
nn.Conv2d(32, 32, 3, stride=2, padding=1),
nn.ReLU() ,
nn.BatchNorm2d(32),
nn.MaxPool2d(2, 2),
nn.Dropout(0.25)
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU() ,
nn.BatchNorm2d(64),
nn.Conv2d(64, 64, 3, stride=2, padding=1),
nn.ReLU() ,
nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2),
nn.Dropout(0.25)
)
self.conv3 = nn.Sequential(
nn.Conv2d(64, 128, 3, padding=1),
nn.ReLU() ,
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Dropout(0.25)
)
self.fc = nn.Sequential(
nn.Linear(128, 10),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = x.view(x.size(0), -1)
return self.fc(x)
model = CNN().to(device)
| Digit Recognizer |
13,630,920 | params = {'n_estimators': 1000,
'max_depth': 5,
'min_samples_split': 6,
'learning_rate': 0.01,
'loss': 'ls',
'max_features':0.25,
'random_state':RANDOM_STATE}
model_gbm = GradientBoostingRegressor(**params)
gbm_pipeline = Pipeline(steps=[
('preprocess', preprocessor),
('model', model_gbm)
])
gbm_pipeline.fit(p_train_x, p_train_y)
gbm_preds = gbm_pipeline.predict(p_val_x)
score = evaluation_metric(gbm_preds, p_val_y)
print('RMSE:', score )<save_to_csv> | LEARNING_RATE = 0.001680
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters() , lr=LEARNING_RATE ) | Digit Recognizer |
13,630,920 | p_test_gbm = gbm_pipeline.predict(p_test)
test_result = pd.DataFrame(
{
'Id': p_test_Id,
'SalePrice': np.exp(p_test_gbm),
})
test_result.to_csv('pipeline_gbm.csv', index=False)
<install_modules> | def train_model(model,train_loader, validation_loader, optimizer, n_epochs=100):
N_test=len(validation_dataset)
accuracy_list=[]
loss_list=[]
for epoch in range(n_epochs):
for x, y in train_loader:
x, y = x.to(device), y.to(device)
model.train()
optimizer.zero_grad()
z = model(x)
loss = criterion(z, y)
loss.backward()
optimizer.step()
loss_list.append(loss.data)
correct=0
for x_test, y_test in validation_loader:
x_test,y_test = x_test.to(device), y_test.to(device)
model.eval()
z = model(x_test)
_, yhat = torch.max(z.data, 1)
correct +=(yhat == y_test ).sum().item()
accuracy = correct / N_test
accuracy_list.append(accuracy)
return accuracy_list, loss_list | Digit Recognizer |
13,630,920 | !pip install.. /input/python-datatable/datatable-0.11.0-cp37-cp37m-manylinux2010_x86_64.whl > /dev/null 2>&1<set_options> | accuracy_list, loss_list=train_model(model=model,n_epochs=100,train_loader=train_loader,validation_loader=validation_loader,optimizer=optimizer ) | Digit Recognizer |
13,630,920 | _ = np.seterr(divide='ignore', invalid='ignore')
<define_variables> | print(accuracy_list, loss_list ) | Digit Recognizer |
13,630,920 | data_types_dict = {
'timestamp': 'int64',
'user_id': 'int32',
'content_id': 'int16',
'content_type_id':'int8',
'task_container_id': 'int16',
'answered_correctly': 'int8',
'prior_question_elapsed_time': 'float32',
'prior_question_had_explanation': 'bool'
}
target = 'answered_correctly'
<load_from_csv> | test_dataset = DatasetMNIST(df_test, transform = val_transform)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=16 ) | Digit Recognizer |
13,630,920 | print('start read train data...')
train_df = dt.fread('.. /input/riiid-test-answer-prediction/train.csv', columns=set(data_types_dict.keys())).to_pandas()
<load_from_csv> | device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
x_test = np.expand_dims(x_test, axis=1)
x_test = torch.from_numpy(x_test ).float().to(device)
x_test.type() | Digit Recognizer |
13,630,920 | lectures_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/lectures.csv')
<categorify> | model.eval()
with torch.no_grad() :
ps = model(x_test)
prediction = torch.argmax(ps, 1)
print('Prediction',prediction ) | Digit Recognizer |
13,630,920 | lectures_df['type_of'] = lectures_df['type_of'].replace('solving question', 'solving_question')
lectures_df = pd.get_dummies(lectures_df, columns=['part', 'type_of'])
part_lectures_columns = [column for column in lectures_df.columns if column.startswith('part')]
types_of_lectures_columns = [column for column in lectures_df.columns if column.startswith('type_of_')]
<merge> | df_export = pd.DataFrame(prediction.cpu().tolist() , columns = ['Label'])
df_export['ImageId'] = df_export.index +1
df_export = df_export[['ImageId', 'Label']]
df_export.head() | Digit Recognizer |
13,630,920 | train_lectures = train_df[train_df.content_type_id == True].merge(lectures_df, left_on='content_id', right_on='lecture_id', how='left')
user_lecture_stats_part = train_lectures.groupby('user_id',as_index = False)[part_lectures_columns + types_of_lectures_columns].sum()
<data_type_conversions> | df_export.to_csv('output.csv', index=False ) | Digit Recognizer |
14,296,580 | lecturedata_types_dict = {
'user_id': 'int32',
'part_1': 'int8',
'part_2': 'int8',
'part_3': 'int8',
'part_4': 'int8',
'part_5': 'int8',
'part_6': 'int8',
'part_7': 'int8',
'type_of_concept': 'int8',
'type_of_intention': 'int8',
'type_of_solving_question': 'int8',
'type_of_starter': 'int8'
}
user_lecture_stats_part = user_lecture_stats_part.astype(lecturedata_types_dict)
<data_type_conversions> | %matplotlib inline | Digit Recognizer |
14,296,580 | for column in user_lecture_stats_part.columns:
if(column !='user_id'):
user_lecture_stats_part[column] =(user_lecture_stats_part[column] > 0 ).astype('int8')
user_lecture_stats_part.dtypes
<drop_column> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
14,296,580 | del(train_lectures)
gc.collect()
<data_type_conversions> | train_set = train.iloc[:,1:]
label = train["label"] | Digit Recognizer |
14,296,580 | user_lecture_agg = train_df.groupby('user_id')['content_type_id'].agg(['sum', 'count'])
user_lecture_agg=user_lecture_agg.astype('int16')
<data_type_conversions> | norm_train_set = train_set / 255
norm_test_set = test / 255 | Digit Recognizer |
14,296,580 | cum = train_df.groupby('user_id')['content_type_id'].agg(['cumsum', 'cumcount'])
cum['cumcount']=cum['cumcount']+1
train_df['user_interaction_count'] = cum['cumcount']
train_df['user_interaction_timestamp_mean'] = train_df['timestamp']/cum['cumcount']
train_df['user_lecture_sum'] = cum['cumsum']
train_df['user_lecture_lv'] = cum['cumsum'] / cum['cumcount']
train_df.user_lecture_lv=train_df.user_lecture_lv.astype('float16')
train_df.user_lecture_sum=train_df.user_lecture_sum.astype('int16')
train_df.user_interaction_count=train_df.user_interaction_count.astype('int16')
train_df['user_interaction_timestamp_mean']=train_df['user_interaction_timestamp_mean']/(1000*3600)
train_df.user_interaction_timestamp_mean=train_df.user_interaction_timestamp_mean.astype('float32')
<set_options> | X_train, X_validate, y_train, y_validate = train_test_split(norm_train_set, label, test_size = 0.1 ) | Digit Recognizer |
14,296,580 | del cum
gc.collect()<data_type_conversions> | X_train = torch.from_numpy(X_train.values.reshape(-1,1,28,28))
X_validate = torch.from_numpy(X_validate.values.reshape(-1,1,28,28))
testing_set = torch.from_numpy(norm_test_set.values.reshape(-1,1,28,28))
y_train = torch.from_numpy(y_train.values)
y_validate = torch.from_numpy(y_validate.values ) | Digit Recognizer |
14,296,580 | train_df['prior_question_had_explanation'].fillna(False, inplace=True)
train_df = train_df.astype(data_types_dict)
train_df = train_df[train_df[target] != -1].reset_index(drop=True)
<groupby> | training_set = torch.utils.data.TensorDataset(X_train.float() , y_train)
validating_set = torch.utils.data.TensorDataset(X_validate.float() , y_validate)
testing_set = torch.utils.data.TensorDataset(testing_set.float() ) | Digit Recognizer |
14,296,580 | content_explation_agg=train_df[["content_id","prior_question_had_explanation",target]].groupby(["content_id","prior_question_had_explanation"])[target].agg(['mean'])
<data_type_conversions> | train_loader = DataLoader(training_set, shuffle=True, batch_size = 88)
validate_loader = DataLoader(validating_set, shuffle=False, batch_size = 88)
test_set = DataLoader(testing_set, shuffle=False, batch_size = 88 ) | Digit Recognizer |
14,296,580 | content_explation_agg=content_explation_agg.unstack()
content_explation_agg=content_explation_agg.reset_index()
content_explation_agg.columns = ['content_id', 'content_explation_false_mean','content_explation_true_mean']
content_explation_agg.content_id=content_explation_agg.content_id.astype('int16')
content_explation_agg.content_explation_false_mean=content_explation_agg.content_explation_false_mean.astype('float16')
content_explation_agg.content_explation_true_mean=content_explation_agg.content_explation_true_mean.astype('float16')
<data_type_conversions> | class CNN_DigitClassifier(nn.Module):
def __init__(self):
super(CNN_DigitClassifier, self ).__init__()
self.features = nn.Sequential(
nn.Conv2d(1, 32, 5),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 5),
nn.ReLU(inplace=True),
nn.MaxPool2d(2,2),
nn.Dropout(0.25),
nn.Conv2d(32, 64, 3),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, 3),
nn.ReLU(inplace=True),
nn.MaxPool2d(2),
nn.Dropout(0.25))
self.classification = nn.Sequential(
nn.Linear(576, 256),
nn.Dropout(0.25),
nn.Linear(256, 10))
def forward(self, images):
images = self.features(images)
images = images.view(images.shape[0], -1)
output = self.classification(images)
return output | Digit Recognizer |
14,296,580 | train_df["attempt_no"] = 1
train_df.attempt_no=train_df.attempt_no.astype('int8')
attempt_no_agg=train_df.groupby(["user_id","content_id"])["attempt_no"].agg(['sum'] ).astype('int8')
train_df["attempt_no"] = train_df[["user_id","content_id",'attempt_no']].groupby(["user_id","content_id"])["attempt_no"].cumsum()
attempt_no_agg=attempt_no_agg[attempt_no_agg['sum'] >1]
<data_type_conversions> | model = CNN_DigitClassifier()
optimizer = optim.RMSprop(model.parameters() , lr=0.001, alpha=0.9)
criterion = nn.CrossEntropyLoss()
lr_reduction = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0.00001)
if torch.cuda.is_available() :
model = model.cuda()
criterion = criterion.cuda() | Digit Recognizer |
14,296,580 | print('start handle timestamp...')
prior_question_elapsed_time_mean=train_df['prior_question_elapsed_time'].mean()
train_df['prior_question_elapsed_time'].fillna(prior_question_elapsed_time_mean, inplace=True)
<data_type_conversions> | count = 0
losses = []
iteration_list = []
training_accuracy = []
validation_accuracy = []
training_loss = []
validation_loss = [] | Digit Recognizer |
14,296,580 | max_timestamp_u = train_df[['user_id','timestamp']].groupby(['user_id'] ).agg(['max'] ).reset_index()
max_timestamp_u.columns = ['user_id', 'max_time_stamp']
max_timestamp_u.user_id=max_timestamp_u.user_id.astype('int32')
train_df['lagtime'] = train_df.groupby('user_id')['timestamp'].shift()
max_timestamp_u2 = train_df[['user_id','lagtime']].groupby(['user_id'] ).agg(['max'] ).reset_index()
max_timestamp_u2.columns = ['user_id', 'max_time_stamp2']
max_timestamp_u2.user_id=max_timestamp_u2.user_id.astype('int32')
train_df['lagtime']=train_df['timestamp']-train_df['lagtime']
lagtime_mean=train_df['lagtime'].mean()
train_df['lagtime'].fillna(lagtime_mean, inplace=True)
train_df['lagtime']=train_df['lagtime']/(1000*3600)
train_df.lagtime=train_df.lagtime.astype('float32')
<data_type_conversions> | def train(epoch):
global count
model.train()
for batch_idx,(data, target)in enumerate(train_loader):
data, target = Variable(data), Variable(target)
if torch.cuda.is_available() :
data = data.cuda()
target = target.cuda()
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
if(batch_idx + 1)% 100 == 0:
losses.append(loss.item())
iteration_list.append(count)
count += 1 | Digit Recognizer |
14,296,580 | train_df['lagtime2'] = train_df.groupby('user_id')['timestamp'].shift(2)
max_timestamp_u3 = train_df[['user_id','lagtime2']].groupby(['user_id'] ).agg(['max'] ).reset_index()
max_timestamp_u3.columns = ['user_id', 'max_time_stamp3']
max_timestamp_u3.user_id=max_timestamp_u3.user_id.astype('int32')
train_df['lagtime2']=train_df['timestamp']-train_df['lagtime2']
lagtime_mean2=train_df['lagtime2'].mean()
train_df['lagtime2'].fillna(lagtime_mean2, inplace=True)
train_df['lagtime2']=train_df['lagtime2']/(1000*3600)
train_df.lagtime2=train_df.lagtime2.astype('float32')
train_df['lagtime3'] = train_df.groupby('user_id')['timestamp'].shift(3)
train_df['lagtime3']=train_df['timestamp']-train_df['lagtime3']
lagtime_mean3=train_df['lagtime3'].mean()
train_df['lagtime3'].fillna(lagtime_mean3, inplace=True)
train_df['lagtime3']=train_df['lagtime3']/(1000*3600)
train_df.lagtime3=train_df.lagtime3.astype('float32')
<feature_engineering> | def evaluate(data_loader, validate=False):
model.eval()
loss = 0
correct = 0
for data, target in data_loader:
data, target = Variable(data), Variable(target)
if torch.cuda.is_available() :
data = data.cuda()
target = target.cuda()
output = model(data)
loss += F.cross_entropy(output, target, size_average=False ).item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss /= len(data_loader.dataset)
accuracy = 100.* correct / len(data_loader.dataset)
if not validate:
lr_reduction.step(loss)
training_accuracy.append(accuracy)
training_loss.append(loss)
else:
validation_accuracy.append(accuracy)
validation_loss.append(loss ) | Digit Recognizer |
14,296,580 | train_df['timestamp']=train_df['timestamp']/(1000*3600)
train_df.timestamp=train_df.timestamp.astype('float16')
user_prior_question_elapsed_time = train_df[['user_id','prior_question_elapsed_time']].groupby(['user_id'] ).tail(1)
user_prior_question_elapsed_time.columns = ['user_id', 'prior_question_elapsed_time']
train_df['delta_prior_question_elapsed_time'] = train_df.groupby('user_id')['prior_question_elapsed_time'].shift()
train_df['delta_prior_question_elapsed_time']=train_df['prior_question_elapsed_time']-train_df['delta_prior_question_elapsed_time']
delta_prior_question_elapsed_time_mean=train_df['delta_prior_question_elapsed_time'].mean()
train_df['delta_prior_question_elapsed_time'].fillna(delta_prior_question_elapsed_time_mean, inplace=True)
train_df.delta_prior_question_elapsed_time=train_df.delta_prior_question_elapsed_time.astype('int32')
train_df['lag'] = train_df.groupby('user_id')[target].shift()
cum = train_df.groupby('user_id')['lag'].agg(['cumsum', 'cumcount'])
user_agg = train_df.groupby('user_id')['lag'].agg(['sum', 'count'] ).astype('int16')
cum['cumsum'].fillna(0, inplace=True)
train_df['user_correctness'] = cum['cumsum'] / cum['cumcount']
train_df['user_correct_count'] = cum['cumsum']
train_df['user_uncorrect_count'] = cum['cumcount']-cum['cumsum']
train_df.drop(columns=['lag'], inplace=True)
train_df['user_correctness'].fillna(0.67, inplace=True)
train_df.user_correctness=train_df.user_correctness.astype('float16')
train_df.user_correct_count=train_df.user_correct_count.astype('int16')
train_df.user_uncorrect_count=train_df.user_uncorrect_count.astype('int16')
del cum
gc.collect()
<data_type_conversions> | def prediction(data_loader):
model.eval()
test_pred = torch.LongTensor()
for batch_idx, data in enumerate(data_loader):
data = Variable(data[0])
if torch.cuda.is_available() :
data = data.cuda()
output = model(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat(( test_pred, pred), dim=0)
return test_pred
| Digit Recognizer |
14,296,580 | train_df.prior_question_had_explanation=train_df.prior_question_had_explanation.astype('int8')
explanation_agg = train_df.groupby('user_id')['prior_question_had_explanation'].agg(['sum', 'count'])
explanation_agg=explanation_agg.astype('int16' )<data_type_conversions> | test_prediction = prediction(test_set ) | Digit Recognizer |
14,296,580 | cum = train_df.groupby('user_id')['prior_question_had_explanation'].agg(['cumsum', 'cumcount'])
cum['cumcount']=cum['cumcount']+1
train_df['explanation_mean'] = cum['cumsum'] / cum['cumcount']
train_df['explanation_true_count'] = cum['cumsum']
train_df['explanation_false_count'] = cum['cumcount']-cum['cumsum']
train_df.explanation_mean=train_df.explanation_mean.astype('float16')
train_df.explanation_true_count=train_df.explanation_true_count.astype('int16')
train_df.explanation_false_count=train_df.explanation_false_count.astype('int16')
<set_options> | submission.to_csv("CNN_model_TPU_submission.csv", index = False ) | Digit Recognizer |
14,584,011 | del cum
gc.collect()<categorify> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
train.head() | Digit Recognizer |
14,584,011 | content_agg = train_df.groupby('content_id')[target].agg(['sum', 'count','var'])
task_container_agg = train_df.groupby('task_container_id')[target].agg(['sum', 'count','var'])
content_agg=content_agg.astype('float32')
task_container_agg=task_container_agg.astype('float32' )<data_type_conversions> | Y_train = to_categorical(train['label'].values, 10)
X_train =(train.loc[:, 'pixel0':] / 255 ).values
X_train.shape, Y_train.shape | Digit Recognizer |
14,584,011 | train_df['task_container_uncor_count'] = train_df['task_container_id'].map(task_container_agg['count']-task_container_agg['sum'] ).astype('int32')
train_df['task_container_cor_count'] = train_df['task_container_id'].map(task_container_agg['sum'] ).astype('int32')
train_df['task_container_std'] = train_df['task_container_id'].map(task_container_agg['var'] ).astype('float16')
train_df['task_container_correctness'] = train_df['task_container_id'].map(task_container_agg['sum'] / task_container_agg['count'])
train_df.task_container_correctness=train_df.task_container_correctness.astype('float16')
content_elapsed_time_agg=train_df.groupby('content_id')['prior_question_elapsed_time'].agg(['mean'])
content_had_explanation_agg=train_df.groupby('content_id')['prior_question_had_explanation'].agg(['mean'])
train_df.dtypes
<load_from_csv> | X_test =(test / 255 ).values | Digit Recognizer |
14,584,011 | questions_df = pd.read_csv(
'.. /input/riiid-test-answer-prediction/questions.csv',
usecols=[0, 1,3,4],
dtype={'question_id': 'int16','bundle_id': 'int16', 'part': 'int8','tags': 'str'}
)
bundle_agg = questions_df.groupby('bundle_id')['question_id'].agg(['count'])
questions_df['content_sub_bundle'] = questions_df['bundle_id'].map(bundle_agg['count'] ).astype('int8')
questions_df['tags'].fillna('188', inplace=True)
<string_transform> | datagener = ImageDataGenerator(
rotation_range=15,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
) | Digit Recognizer |
14,584,011 | def gettags(tags,num):
tags_splits=tags.split(" ")
result=''
for t in tags_splits:
x=int(t)
if(x<32*(num+1)and x>=32*num):
result=result+' '+t
return result
<categorify> | example = X_train[6].reshape(( 1, 28, 28, 1))
label = Y_train[6] | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.