kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
8,959,201 | best_clf.score(train_X, train_y )<save_to_csv> | learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 ) | Digit Recognizer |
8,959,201 | test_y = best_clf.predict(test_X)
test_ID= titanic_data_test.PassengerId
submission = pd.DataFrame({ "PassengerId": test_ID, "Survived": test_y})
submission.to_csv('./gender_submission.csv', index=False )<import_modules> | batch_size = 100
epochs = 30
hist = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] ) | Digit Recognizer |
8,959,201 | import numpy as np
import pandas as pd
from catboost import CatBoostClassifier
from sklearn.model_selection import train_test_split
import re
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestClassifier
from catboost import CatBoostClassifier
from sklearn.preprocessing import OneHotEncoder
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.linear_model import LogisticRegression
<load_from_csv> | Y_pred_classes = np.argmax(pred,axis = 1)
Y_true = np.argmax(Y_val,axis = 1 ) | Digit Recognizer |
8,959,201 | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.info()<load_from_csv> | results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" ) | Digit Recognizer |
8,959,201 | <feature_engineering><EOS> | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("cnn_mnist_datagen.csv",index=False ) | Digit Recognizer |
9,246,679 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y> | %matplotlib inline
| Digit Recognizer |
9,246,679 | train_names = train_data[['Name','Age']].dropna().reset_index()
train_names['Name'] = train_names['Name'].str.lower()
new_date = []
for i in train_names['Name']:
d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())]
new_date.append(d)
df = pd.DataFrame(new_date, columns = ['Name'])
df['Age'] = train_names['Age']
features_train = df['Name']
target_train = df['Age']<feature_engineering> | train = pd.read_csv(".. /input/digit-recognizer/train.csv")
test= pd.read_csv(".. /input/digit-recognizer/test.csv")
| Digit Recognizer |
9,246,679 | count_tf_idf = TfidfVectorizer()
tf_idf = count_tf_idf.fit_transform(features_train)
count_tf_idf_ts = TfidfVectorizer()
tf_id_ts = count_tf_idf.transform(features_train )<categorify> | X_train =(train.iloc[:,1:].values ).astype('float32')
y_train = train.iloc[:,0].values.astype('int32')
X_test = test.values.astype('float32')
| Digit Recognizer |
9,246,679 | clf_t = RandomForestRegressor(random_state=42,
criterion='mae' ).fit(count_tf_idf.transform(features_train), target_train )<feature_engineering> | img_rows, img_cols = 28, 28
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols,1)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols,1 ) | Digit Recognizer |
9,246,679 | ind = train_data.loc[train_data['Age'].isnull() ==True].index
ddf = train_data.loc[ind, ['Name']]
ddf['Name'] = ddf['Name'].str.lower()
new_date = []
for i in ddf['Name']:
d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())]
new_date.append(d)
df = pd.DataFrame(new_date, columns = ['Name'])
df.index=ind
features_train = df['Name']<predict_on_test> | mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
def standardize(x):
return(x-mean_px)/std_px
X_train = standardize(X_train)
X_test = standardize(X_test ) | Digit Recognizer |
9,246,679 | d = round(pd.DataFrame(clf_t.predict(count_tf_idf.transform(features_train)) ,columns=['new_adge']))
d.index=ind
d<data_type_conversions> | y_train = to_categorical(y_train)
num_classes = y_train.shape[1] | Digit Recognizer |
9,246,679 | for i in d.index:
for a in train_data.index:
if i == a:
train_data.loc[i,['Age']] = int(d.loc[i,['new_adge']])
train_data['Age'] = train_data['Age'].astype('int')
train_data.info()<feature_engineering> | gen = ImageDataGenerator(rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1,
shear_range = 0.1 ) | Digit Recognizer |
9,246,679 | ind = test_data.loc[test_data['Age'].isnull() ==True].index
ddf = test_data.loc[ind, ['Name']]
ddf['Name'] = ddf['Name'].str.lower()
new_date = []
for i in ddf['Name']:
d = [" ".join(re.sub(r"[^a-z ]", ' ', str(i)).split())]
new_date.append(d)
df = pd.DataFrame(new_date, columns = ['Name'])
df.index=ind
features_test = df['Name']<predict_on_test> | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.10, random_state=4)
batches = gen.flow(X_train, y_train, batch_size=64)
val_batches=gen.flow(X_val, y_val, batch_size=64 ) | Digit Recognizer |
9,246,679 | d = round(pd.DataFrame(clf_t.predict(count_tf_idf.transform(features_test)) ,columns=['new_adge']))
d.index=ind
for i in d.index:
for a in test_data.index:
if i == a:
test_data.loc[i,['Age']] = int(d.loc[i,['new_adge']])
test_data['Age'] = test_data['Age'].astype('int')
test_data.info()<filter> | model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3),
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=(3, 3), strides=1, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
| Digit Recognizer |
9,246,679 | train_data.loc[5,['Age']]<count_values> | model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'] ) | Digit Recognizer |
9,246,679 | train_data['Embarked'].value_counts()<feature_engineering> | learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
history=model.fit_generator(generator=batches,
steps_per_epoch=batches.n,
epochs=3,
validation_data=val_batches,
validation_steps=val_batches.n,
callbacks=[learning_rate_reduction])
| Digit Recognizer |
9,246,679 | train_data['Embarked'] = train_data['Embarked'].fillna('S')
train_data.info()<categorify> | predictions = model.predict_classes(X_test, verbose=0)
submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": predictions})
submissions.to_csv("submission.csv", index=False, header=True ) | Digit Recognizer |
6,958,423 | train_data['Sex'] = pd.get_dummies(train_data['Sex'])
train_data<data_type_conversions> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
x_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
| Digit Recognizer |
6,958,423 | train_data['Sex'] = train_data['Sex'].astype('int')
train_data.info()<categorify> | import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
from collections import deque
import numpy as np | Digit Recognizer |
6,958,423 | test_data['Sex'] = pd.get_dummies(test_data['Sex'])
test_data['Sex'] = test_data['Sex'].astype('int')
test_data<categorify> | y_train = train['label']
x_train = train.drop(labels = ['label'], axis=1 ) | Digit Recognizer |
6,958,423 | one_hot = pd.get_dummies(train_data['Embarked'],drop_first=True)
train_data = train_data.drop('Embarked',axis = 1)
train_data = train_data.join(one_hot)
train_data<categorify> | x_train = x_train.values.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.values.reshape(x_test.shape[0], 28, 28, 1 ) | Digit Recognizer |
6,958,423 | one_hot = pd.get_dummies(test_data['Embarked'],drop_first=True)
test_data = test_data.drop('Embarked',axis = 1)
test_data = test_data.join(one_hot)
test_data<count_duplicates> | input_shape =(28, 28, 1)
| Digit Recognizer |
6,958,423 | len(train_data.loc[train_data['Ticket'].duplicated() ==True][['Ticket','Cabin']].sort_values('Ticket'))<feature_engineering> | x_train = x_train.astype('float32')
x_test = x_test.astype('float32' ) | Digit Recognizer |
6,958,423 | train_data['Name'] = train_data['Name'].str.lower()
<feature_engineering> | x_train /= 255
x_test /= 255
| Digit Recognizer |
6,958,423 |
<categorify> | from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D | Digit Recognizer |
6,958,423 |
<drop_column> | model = Sequential()
model.add(Conv2D(256, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
model.add(Conv2D(128, kernel_size=(5, 5), activation='relu'))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(100, activation=tf.nn.relu))
model.add(Dropout(0.2))
model.add(Dense(10, activation=tf.nn.softmax)) | Digit Recognizer |
6,958,423 | test_data['Name'] = test_data['Name'].str.lower()
<filter> | model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
6,958,423 | train_data.loc[(train_data['Ticket']=='S.O.C.14879')]<filter> | model.fit(x=x_train,y=y_train,epochs=150, verbose = 1 ) | Digit Recognizer |
6,958,423 | train_data.loc[train_data['Cabin'].isnull() ==False]<create_dataframe> | pred = model.predict(x_test)
results = np.argmax(pred,axis = 1)
results = pd.Series(results,name = 'Label' ) | Digit Recognizer |
6,958,423 | <feature_engineering><EOS> | sub = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
sub.to_csv("cnn_best_model.csv",index=False ) | Digit Recognizer |
1,639,629 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe> | final_models = {"keras1":"", "keras2":""} | Digit Recognizer |
1,639,629 |
<feature_engineering> | train_data = pd.read_csv(".. /input/train.csv")
x_train_all = np.array(train_data.drop(["label"], axis=1))
print(x_train_all[0][x_train_all[0] > 0])
x_train_all = x_train_all/255
x_train_all = stretch_image(x_train_all)
y_train_all = np.array(train_data["label"])
idx_all = range(x_train_all.shape[0])
if 1==2:
bad_data_idx = [28290, 16301,14101,15065, 6389,7764,28611,20954,2316, 37056, 37887, 36569, 40257]
plot_bad_data(x_train_all[bad_data_idx], y_train_all[bad_data_idx])
x_train_all = np.delete(x_train_all, bad_data_idx, axis=0)
y_train_all = np.delete(y_train_all, bad_data_idx)
idx_all = np.delete(idx_all, bad_data_idx)
y_train_all = keras.utils.to_categorical(y_train_all, num_classes=10)
x_train, x_valid, y_train, y_valid, idx_train, idx_valid = skm.train_test_split(x_train_all, y_train_all,idx_all, test_size=0.2)
test_data = pd.read_csv(".. /input/test.csv")
x_test = np.array(test_data)
x_test = x_test/255
x_test = stretch_image(x_test)
| Digit Recognizer |
1,639,629 |
<define_search_space> | datagen = keras.preprocessing.image.ImageDataGenerator(featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=False, shear_range=0.2)
datagen.fit(x_train.reshape(-1,28,28,1))
| Digit Recognizer |
1,639,629 | train_data.loc[[128,699,715,75]]<drop_column> | learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='acc',
patience=1,
verbose=1,
factor=0.5,
min_lr=0.000001 ) | Digit Recognizer |
1,639,629 | train_data = train_data.drop('PassengerId',axis=1)
dt = train_data
<filter> | def tensorflow_keras_model(x_train, y_train, x_valid, y_valid, num_classes,\
num_epochs, learning_rate):
keras_model = keras.models.Sequential()
keras_model.add(keras.layers.Conv2D(filters=32,kernel_size=(6,6),strides=(1,1), \
padding="same",input_shape=(28,28,1)))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.MaxPool2D(strides=(2,2), padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Conv2D(filters=64,kernel_size=(6,6),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.MaxPool2D(strides=(2,2), padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Flatten())
keras_model.add(keras.layers.Dense(units=1024))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Dense(units=num_classes))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('softmax'))
opt = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
keras_model.compile(optimizer=opt,loss="categorical_crossentropy", metrics=["accuracy"])
keras_model.fit(x_train.reshape(-1,28,28,1), y_train, epochs=num_epochs, callbacks=[learning_rate_reduction])
final_models["keras1"] = keras_model
keras_model.save("Model_MNIST_Keras_Lenet.h5")
cur_y_pred = keras_model.predict(x_valid.reshape(-1,28,28,1))
y_valid_argmax = np.argmax(y_valid, 1)
y_pred_argmax = np.argmax(cur_y_pred, 1)
y_correct = np.equal(y_valid_argmax, y_pred_argmax)
acc = y_correct.sum() /y_pred_argmax.shape[0]
return cur_y_pred, acc, y_valid_argmax,y_pred_argmax | Digit Recognizer |
1,639,629 |
<drop_column> | def tensorflow_keras_model_2(x_train, y_train, x_valid, y_valid, num_classes,\
num_epochs, learning_rate):
keras_model = keras.models.Sequential()
keras_model.add(keras.layers.Conv2D(filters=32,kernel_size=(3,3),strides=(1,1), \
padding="same",activation='relu', input_shape=(28,28,1)))
keras_model.add(keras.layers.Conv2D(filters=32,kernel_size=(3,3),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Conv2D(filters=32,kernel_size=(5,5),strides=(2,2), \
padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Conv2D(filters=64,kernel_size=(3,3),strides=(1,1), \
padding="same",activation='relu',input_shape=(28,28,1)))
keras_model.add(keras.layers.Conv2D(filters=64,kernel_size=(3,3),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Conv2D(filters=64,kernel_size=(5,5),strides=(2,2), \
padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.050))
keras_model.add(keras.layers.Conv2D(filters=128,kernel_size=(3,3),strides=(1,1), \
padding="same",activation='relu'))
keras_model.add(keras.layers.Conv2D(filters=128,kernel_size=(3,3),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Conv2D(filters=128,kernel_size=(5,5),strides=(2,2), \
padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.075))
keras_model.add(keras.layers.Flatten())
keras_model.add(keras.layers.Dense(units=1024))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Dropout(rate=0.100))
keras_model.add(keras.layers.Dense(units=128))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Dropout(rate=0.100))
keras_model.add(keras.layers.Dense(units=num_classes))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('softmax'))
opt = keras.optimizers.RMSprop(lr=0.0001, rho=0.9, epsilon=1e-08, decay=0.0)
keras_model.compile(optimizer=opt,loss="categorical_crossentropy", metrics=["accuracy"])
keras_model.fit_generator(datagen.flow(x_train.reshape(-1,28,28,1), y_train, batch_size=32),steps_per_epoch=len(x_train)/ 32, \
epochs=num_epochs, callbacks=[learning_rate_reduction])
final_models["keras2"] = keras_model
keras_model.save("Model_MNIST_Keras_Resnet.h5")
cur_y_pred = keras_model.predict(x_valid.reshape(-1,28,28,1))
y_valid_argmax = np.argmax(y_valid, 1)
y_pred_argmax = np.argmax(cur_y_pred, 1)
y_correct = np.equal(y_valid_argmax, y_pred_argmax)
acc = y_correct.sum() /y_pred_argmax.shape[0]
return cur_y_pred, acc, y_valid_argmax,y_pred_argmax , keras_model
| Digit Recognizer |
1,639,629 | train_data = train_data.drop('Cabin',axis=1)
test_data = test_data.drop('Cabin',axis=1 )<split> | model2_list = []
num_model = 12
def exec_tensorflow_keras_model_2() :
num_epochs = 30
learning_rate=0.00001
num_rows, num_features, num_classes = x_train.shape[0], x_train.shape[1], 10
for imodel in range(num_model):
final_pred_base_model, acc,y_valid_argmax,y_pred_argmax, keras_model = \
tensorflow_keras_model_2(x_train, y_train, x_valid, y_valid, num_classes, num_epochs, learning_rate)
model2_list.append(keras_model)
print("Num Epoch:", num_epochs, " Accuracy:", acc)
plot_y_true_vs_y_pred(x_valid, y_valid_argmax.reshape(len(y_valid)) , y_pred_argmax.reshape(len(y_valid)) , idx_valid ) | Digit Recognizer |
1,639,629 | train, test = train_test_split(train_data, test_size=0.33, random_state=42)
features_train = train.drop(['Survived'],axis=1)
target_train = train['Survived']
features_test = test.drop(['Survived'],axis=1)
target_test = test['Survived']<train_model> | def tensorflow_keras_model_3(x_train, y_train, x_valid, y_valid, num_classes,\
num_epochs, learning_rate):
keras_model = keras.models.Sequential()
keras_model.add(keras.layers.Conv2D(filters=6,kernel_size=(6,6),strides=(1,1), \
padding="same",input_shape=(28,28,1)))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.MaxPool2D(strides=(2,2), padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Conv2D(filters=16,kernel_size=(6,6),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.MaxPool2D(strides=(2,2), padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Conv2D(filters=120,kernel_size=(6,6),strides=(1,1), \
padding="same"))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.MaxPool2D(strides=(2,2), padding="same"))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Flatten())
keras_model.add(keras.layers.Dense(units=120))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Dense(units=120))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('relu'))
keras_model.add(keras.layers.Dropout(rate=0.05))
keras_model.add(keras.layers.Dense(units=num_classes))
keras_model.add(keras.layers.BatchNormalization())
keras_model.add(keras.layers.Activation('softmax'))
opt = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
keras_model.compile(optimizer=opt,loss="categorical_crossentropy", metrics=["accuracy"])
keras_model.fit(x_train.reshape(-1,28,28,1), y_train, epochs=num_epochs, callbacks=[learning_rate_reduction])
final_models["keras1"] = keras_model
keras_model.save("Model_MNIST_Keras_Lenet.h5")
cur_y_pred = keras_model.predict(x_valid.reshape(-1,28,28,1))
y_valid_argmax = np.argmax(y_valid, 1)
y_pred_argmax = np.argmax(cur_y_pred, 1)
y_correct = np.equal(y_valid_argmax, y_pred_argmax)
acc = y_correct.sum() /y_pred_argmax.shape[0]
return cur_y_pred, acc, y_valid_argmax,y_pred_argmax | Digit Recognizer |
1,639,629 | model = CatBoostClassifier(iterations=200,
depth=9,learning_rate=0.05,l2_leaf_reg=10,random_seed=42,
loss_function='Logloss',grow_policy='Lossguide',
max_leaves=39,
verbose=True,eval_metric='Accuracy',nan_mode='Min',cat_features=['Pclass','Sex','Q','S'])
model.fit(features_train, target_train,text_features=['Ticket','Name'],
plot=True,eval_set=(features_test,target_test),verbose=False)
print(accuracy_score(model.predict(features_train),target_train))
print(accuracy_score(model.predict(features_test),target_test))<drop_column> | model2 = final_models["keras2"]
x_valid = x_valid.reshape(-1,28,28,1)
y_pred_valid2 = np.zeros(( x_valid.shape[0],10))
for imodel in range(num_model):
y_pred_valid2 = y_pred_valid2 + model2_list[imodel].predict(x_valid)
y_pred_valid = y_pred_valid2
y_pred_valid_final = np.argmax(y_pred_valid, axis=1)
y_correct = np.equal(y_pred_valid_final, np.argmax(y_valid, axis=1))
acc_final = y_correct.sum() /y_valid.shape[0]
print(acc_final ) | Digit Recognizer |
1,639,629 | test_data = test_data.drop('PassengerId',axis=1)
dt = test_data
test_data<save_to_csv> | x_test = x_test.reshape(-1,28,28,1)
y_pred_test2 = np.zeros(( x_test.shape[0],10))
for imodel in range(num_model):
y_pred_test2 = y_pred_test2 + model2_list[imodel].predict(x_test)
| Digit Recognizer |
1,639,629 | submission=pd.DataFrame(model.predict(test_data),columns=['Survived'])
submission['PassengerId'] = hui
submission.Survived = submission.Survived.astype(int)
filename = 'Titanic Predictions1.csv'
submission.to_csv(filename,index=False)
print('Saved file: ' + filename )<load_from_csv> | y_pred_test = y_pred_test2
y_pred_test_final = np.argmax(y_pred_test, axis=1)
| Digit Recognizer |
1,639,629 | train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
print(train.shape)
train.head()<count_missing_values> | dictionary_data = {"ImageId":np.arange(1, x_test.shape[0]+1), "Label":y_pred_test_final}
df_final = pd.DataFrame(dictionary_data)
df_final.to_csv("submission.csv", index=False ) | Digit Recognizer |
4,859,372 | train.isnull().sum()<drop_column> | train = pd.read_csv(".. /input/train.csv")
print(train.shape)
train.head() | Digit Recognizer |
4,859,372 | train = train.drop(["Cabin"],axis = 1)
print(train.shape)
train.head()<count_missing_values> | test= pd.read_csv(".. /input/test.csv")
print(test.shape)
test.head() | Digit Recognizer |
4,859,372 | train.isnull().sum()<count_missing_values> | Y_train = train["label"]
X_train = train.drop(labels = ["label"],axis = 1 ) | Digit Recognizer |
4,859,372 | train.isnull().sum()<categorify> | X_train = X_train / 255.0
test = test / 255.0
print("x_train shape: ",X_train.shape)
print("test shape: ",test.shape ) | Digit Recognizer |
4,859,372 | train['Embarked'].fillna(method = 'backfill', inplace = True )<data_type_conversions> | Y_train = to_categorical(Y_train, num_classes = 10 ) | Digit Recognizer |
4,859,372 | train['Age'].fillna(train['Age'].median() , inplace = True )<count_missing_values> | X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2)
print("x_train shape",X_train.shape)
print("x_test shape",X_val.shape)
print("y_train shape",Y_train.shape)
print("y_test shape",Y_val.shape ) | Digit Recognizer |
4,859,372 | train.isnull().sum()<count_missing_values> | model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax")) | Digit Recognizer |
4,859,372 | train.isnull().sum()<data_type_conversions> | optimizer = RMSprop(lr=0.001,rho=0.9, epsilon=1e-08, decay=0.0 ) | Digit Recognizer |
4,859,372 | def str_to_cat(training_df):
for p,q in training_df.items() :
if is_string_dtype(q):
training_df[p] = q.astype('category' ).cat.as_ordered()
return training_df<categorify> | model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] ) | Digit Recognizer |
4,859,372 | def mydf_to_nums(training_df, feature, null_status):
if not is_numeric_dtype(feature):
training_df[null_status] = feature.cat.codes + 1
def mydf_imputer(training_df, feature, null_status, null_table):
if is_numeric_dtype(feature):
if pd.isnull(feature ).sum() or(null_status in null_table):
filler = null_table[null_status] if null_status in null_table else feature.median()
training_df[null_status] = feature.fillna(filler)
null_table[null_status] = filler
return null_table
def mydf_preprocessor(training_df, null_table):
if null_table is None:
null_table = dict()
for p,q in training_df.items() :
null_table = mydf_imputer(training_df, q, p, null_table)
for p,q in training_df.items() :
mydf_to_nums(training_df, q, p)
training_df = pd.get_dummies(training_df, dummy_na = True)
res = [training_df, null_table]
return res<categorify> | epochs = 20
batch_size = 378 | Digit Recognizer |
4,859,372 | train = str_to_cat(train)
train_df,my_table = mydf_preprocessor(train,null_table = None)
print(train_df.shape)
train_df.head()<count_duplicates> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train ) | Digit Recognizer |
4,859,372 | train.duplicated().sum()<count_missing_values> | history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val), steps_per_epoch=X_train.shape[0] // batch_size ) | Digit Recognizer |
4,859,372 | test.isnull().sum()<count_missing_values> | results = model.predict(test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label" ) | Digit Recognizer |
4,859,372 | <drop_column><EOS> | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("submission.csv",index=False ) | Digit Recognizer |
14,651,554 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.layers import Dropout, Dense, Conv2D, AveragePooling2D, Flatten, MaxPooling2D
from keras.models import Sequential, load_model
from sklearn.metrics import accuracy_score | Digit Recognizer |
14,651,554 | test['Age'].fillna(test['Age'].median() , inplace = True )<feature_engineering> | train_data = pd.read_csv('.. /input/digit-recognizer/train.csv')
prediction = pd.read_csv('.. /input/digit-recognizer/test.csv')
submission1 = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv' ) | Digit Recognizer |
14,651,554 | class_fares = test.groupby('Pclass')['Fare'].median()
test['median_fare'] = test['Pclass'].apply(lambda x: class_fares[x])
test['Fare'].fillna(test['median_fare'], inplace = True,)
del test['median_fare']<count_missing_values> | train_data.isnull().sum().sum() | Digit Recognizer |
14,651,554 | test.isnull().sum()<count_missing_values> | train_data, test_data = train_test_split(train_data, stratify=train_data["label"], test_size = 0.20)
train_data, cv_data = train_test_split(train_data, stratify=train_data["label"], test_size = 0.25)
train_data.shape, test_data.shape, cv_data.shape | Digit Recognizer |
14,651,554 | test.isnull().sum()<concatenate> | train_y = train_data['label']
train_x = train_data.drop(columns = ['label'])
cv_y = cv_data['label']
cv_x = cv_data.drop(columns = ['label'])
test_y = test_data['label']
test_x = test_data.drop(columns = ['label'] ) | Digit Recognizer |
14,651,554 | test = str_to_cat(test)
test,my_table = mydf_preprocessor(test,null_table = None)
print(test.shape)
test.head(5 )<count_duplicates> | train_x = np.array(train_x)
train_x = train_x.reshape(25200, 28, 28, 1)
train_y = np.array(pd.get_dummies(train_y))
cv_x = np.array(cv_x)
cv_x = cv_x.reshape(8400, 28, 28, 1)
cv_y = np.array(pd.get_dummies(cv_y))
test_x = np.array(test_x)
test_x = test_x.reshape(8400, 28, 28, 1 ) | Digit Recognizer |
14,651,554 | test.duplicated().sum()<drop_column> | prediction.isnull().sum().sum() | Digit Recognizer |
14,651,554 | train = train.drop('Name', axis = 1)
test = test.drop(columns=['Name'], axis = 1 )<categorify> | prediction = np.array(prediction)
prediction = prediction.reshape(28000, 28, 28, 1 ) | Digit Recognizer |
14,651,554 | X_train = train.drop('Survived', axis=1)
X_train = pd.get_dummies(X_train,columns=['Embarked','Pclass'])
Y_train = train['Survived']
print(X_train.shape,Y_train.shape )<normalization> | def model() :
model = Sequential()
model.add(Conv2D(32,kernel_size=3,activation='relu',input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=3,activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64,kernel_size=5,strides=2,padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Dense(10, activation='softmax'))
return model
model = model()
model.summary() | Digit Recognizer |
14,651,554 | scaler = StandardScaler().fit(X_train)
scaled_X = scaler.transform(X_train)
print(scaled_X )<categorify> | model.compile(loss = 'categorical_crossentropy', optimizer = "adam", metrics = ["accuracy"])
hist = model.fit(train_x, train_y, validation_data=(cv_x, cv_y), epochs = 40 ) | Digit Recognizer |
14,651,554 | x_test = pd.get_dummies(test,columns=['Embarked','Pclass'] )<normalization> | log_frame = pd.DataFrame(columns = ["Epoch", "Train_Loss", "Train_Accuracy", "CV_Loss", "CV_Accuracy"])
log_frame["Epoch"] = epoch_number
log_frame["Train_Loss"] = train_loss
log_frame["Train_Accuracy"] = train_acc
log_frame["CV_Loss"] = val_loss
log_frame["CV_Accuracy"] = val_acc
log_frame | Digit Recognizer |
14,651,554 | sc = StandardScaler().fit(x_test)
sc_X = sc.transform(x_test)
print(sc_X )<train_model> | test_predict = model.predict(test_x ) | Digit Recognizer |
14,651,554 | model_1 = RandomForestClassifier(n_jobs = -1, n_estimators = 10,
bootstrap = True)
model_1.fit(scaled_X,Y_train )<predict_on_test> | l = []
for i in range(test_predict.shape[0]):
j = test_predict[i].argmax()
l.append(j)
test_predict = pd.DataFrame(l ) | Digit Recognizer |
14,651,554 | pred = model_1.predict(scaled_X )<train_on_grid> | acc = accuracy_score(test_y, l)* 100
acc | Digit Recognizer |
14,651,554 | parameters = {'n_estimators': [20, 50, 60, 80, 90, 100, 120, 150, 200], 'max_features': ["auto", "sqrt", "log2"]}
rfc = RandomForestClassifier(random_state = 1)
cls = GridSearchCV(estimator = rfc, param_grid = parameters)
cls.fit(scaled_X, Y_train)
cls.best_params_<train_model> | prediction_predict = model.predict(prediction ) | Digit Recognizer |
14,651,554 | model_2 = RandomForestClassifier(n_estimators=80, max_features="auto")
model_2.fit(scaled_X, Y_train)
y_pred = model_2.predict(sc_X )<save_to_csv> | l = []
for i in range(prediction_predict.shape[0]):
j = prediction_predict[i].argmax()
l.append(j)
submission2 = pd.DataFrame(l)
submission2 = submission2.rename(columns = {0 : 'Label'} ) | Digit Recognizer |
14,651,554 | sub_df = pd.DataFrame({'PassengerId' : test["PassengerId"],'Survived' : y_pred})
submission = sub_df.to_csv('submission.csv',index=False )<set_options> | submission1.drop(columns = ['Label'], inplace = True)
submission = pd.concat([submission1, submission2], axis = 1 ) | Digit Recognizer |
14,651,554 | sns.set(style="ticks", context="talk")
warnings.filterwarnings('ignore' )<load_from_csv> | t = prediction.reshape(28000, 28, 28)
plt.imshow(t[2222])
print(" The label predicted is ", submission["Label"][2222] ) | Digit Recognizer |
14,651,554 | <feature_engineering><EOS> | submission.to_csv('./submission.csv',index=False ) | Digit Recognizer |
1,515,892 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<filter> | %matplotlib inline
| Digit Recognizer |
1,515,892 | data[data['Fare'].isna() ]<sort_values> | train, test = pd.read_csv(".. /input/train.csv"), pd.read_csv(".. /input/test.csv" ) | Digit Recognizer |
1,515,892 | data_corr = data.corr().abs().unstack().sort_values(kind = "quicksort", ascending = False ).reset_index()
data_corr.loc[data_corr['level_0'] == 'Fare']<categorify> | train['label'].value_counts() | Digit Recognizer |
1,515,892 | data['Fare'] = data['Fare'].fillna(data.groupby(['Pclass'])['Fare'].transform('median'))<count_missing_values> | train_pixels, test_pixels = train.iloc[:,1:].values.astype('float32'), test.values.astype('float32')
train_labels = train.iloc[:,0].values.astype('int32')
train_labels = train_labels.reshape(-1, 1)
print(f'train_pixels shape = {train_pixels.shape}')
print(f'test_pixels shape = {test_pixels.shape}')
print(f'train_labels shape = {train_labels.shape}' ) | Digit Recognizer |
1,515,892 | data.isna().sum()<feature_engineering> | train_pixels, test_pixels = train_pixels.reshape(-1, 28, 28, 1), test_pixels.reshape(-1, 28, 28, 1)
print(f'train_pixels shape = {train_pixels.shape}')
print(f'test_pixels shape = {test_pixels.shape}' ) | Digit Recognizer |
1,515,892 | data['Title'] = data.Name.apply(lambda x: re.search('([A-Z][a-z]+)\.', x ).group(1))
data['Title'].unique()<count_values> | train_pixels, test_pixels = train_pixels / 255.0, test_pixels / 255.0 | Digit Recognizer |
1,515,892 | data['Title'].value_counts(normalize = True ).round(3 )<count_missing_values> | train_labels = to_categorical(train_labels, num_classes = 10)
print(f'train_labels shape = {train_labels.shape}')
train_labels | Digit Recognizer |
1,515,892 | data['Age'].isna().sum()<filter> | train_pixels, val_pixels, train_labels, val_labels = train_test_split(train_pixels, train_labels, test_size = 0.1, random_state=None)
train_pixels.shape, train_labels.shape, val_pixels.shape, val_labels.shape, test_pixels.shape | Digit Recognizer |
1,515,892 | data[data['Age'] < 1]<feature_engineering> | m_train = train_pixels.shape[0]
m_val = val_pixels.shape[0]
m_test = test_pixels.shape[0]
n_x = test.shape[1]
n_y = train_labels.shape[1]
print(f" m_train = {m_train} / m_val = {m_val} / m_test = {m_test} / n_x = {n_x} / n_y = {n_y}" ) | Digit Recognizer |
1,515,892 | data.loc[data['Age'] < 1, 'Age'] = None
data['Age'].isna().sum()<sort_values> | datagen = ImageDataGenerator(
rotation_range = 10,
zoom_range = 0.1,
shear_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1 ) | Digit Recognizer |
1,515,892 | data_corr = data.corr().abs().unstack().sort_values(kind = "quicksort", ascending = False ).reset_index()
data_corr.loc[data_corr['level_0'] == 'Age']<data_type_conversions> | model = Sequential()
model.add(Conv2D(filters=6, kernel_size=5, padding='Same',
input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Conv2D(filters=16, kernel_size=5))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPool2D(pool_size=2, strides=2))
model.add(Flatten())
model.add(Dense(120))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(84))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(10))
model.add(BatchNormalization())
model.add(Activation('softmax'))
model.compile(optimizer=Adam(lr=1e-3),
loss='categorical_crossentropy',
metrics=['accuracy'])
lr_decay = ReduceLROnPlateau(monitor='loss',
patience=1, verbose=1,
factor=0.5, min_lr=1e-7)
History = model.fit(train_pixels, train_labels, epochs=40,
validation_data=(val_pixels, val_labels),
callbacks=[lr_decay], verbose=1)
train_loss, train_acc = model.evaluate(train_pixels, train_labels)
val_loss, val_acc = model.evaluate(val_pixels, val_labels)
print(f'model: train accuracy = {round(train_acc * 100, 4)}%')
print(f'model: val accuracy = {round(val_acc * 100, 4)}%')
print(f'model: val error = {round(( 1 - val_acc)* m_val)} examples')
| Digit Recognizer |
1,515,892 | <sort_values><EOS> | predictions = model.predict_classes(test_pixels)
submission = pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label" : predictions})
submission.to_csv("submission.csv", index=False, header=True)
submission.head() | Digit Recognizer |
6,485,130 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<groupby> | %reload_ext autoreload
%autoreload 2
%matplotlib inline
| Digit Recognizer |
6,485,130 | data[['Pclass', 'Embarked', 'Survived']].groupby(['Pclass', 'Embarked'] ).mean().round(2 )<feature_engineering> | print(torch.cuda.is_available() , torch.backends.cudnn.enabled ) | Digit Recognizer |
6,485,130 | print('Pclass: ', data[data['Cabin_group'] == 'T']['Pclass'].values)
data.loc[data['Cabin'] == 'T', 'Cabin_group'] = 'A'<groupby> | train_df = pd.read_csv(INPUT/"train.csv")
train_df.head(3 ) | Digit Recognizer |
6,485,130 | data[['Cabin_group', 'Pclass', 'Survived']].groupby(['Cabin_group', 'Pclass'] ).mean().round(2 )<categorify> | test_df = pd.read_csv(INPUT/"test.csv")
test_df.head(3 ) | Digit Recognizer |
6,485,130 | data['Sex_int'] = data['Sex'].replace({'male': 1, 'female': 0})
data['Embarked_int'] = data['Embarked'].replace({'S': 0, 'C': 1, 'Q':2})
data['Title_int'] = data['Title'].replace({'Mr': 0, 'Mrs': 1, 'Miss':2, 'Master':3, 'Special':4})
data['age_group_int'] = data['age_group'].replace({'Adults': 0, 'Middle age': 1, 'Infants':2, 'Adolescents':3, 'Preschool':4, 'Children':5, 'Seniors':6})
data['Cabin_group_int'] = data['Cabin_group'].replace({'unkown': 0, 'ABC': 1, 'DE':2, 'FG':3} )<filter> | TRAIN = Path(".. /train")
TEST = Path(".. /test" ) | Digit Recognizer |
6,485,130 | data = data.loc[:,~data.columns.str.endswith('_int')]<data_type_conversions> | sorted(os.listdir(TRAIN)) | Digit Recognizer |
6,485,130 | dummy_features = ['Sex'
, 'Pclass'
, 'Embarked'
, 'Cabin_group'
, 'Title'
, 'age_group'
]
for col in dummy_features:
data[col] = data[col].astype(object)
drop_features = ['PassengerId', 'Ticket', 'Name', 'Cabin'
,'small_family_size'
,'Alone'
,'SibSp'
,'Parch'
,'Age'
]
data = pd.concat([data, pd.get_dummies(data[dummy_features], drop_first = True)], axis = 1, sort = False)
data.drop(columns = data[dummy_features], inplace = True)
data.drop(columns = data[drop_features], inplace = True)
data.tail()<correct_missing_values> | if os.path.isdir(TRAIN):
print('Train directory has been created')
else:
print('Train directory creation failed.')
if os.path.isdir(TEST):
print('Test directory has been created')
else:
print('Test directory creation failed.' ) | Digit Recognizer |
6,485,130 | train.dropna(inplace = True)
test.dropna(inplace = True )<split> | from PIL import Image | Digit Recognizer |
6,485,130 | y = train['Survived']
x = train.drop(columns = target)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 42
, stratify = y
)<train_on_grid> | def pix2img(pix_data, filepath):
img_mat = pix_data.reshape(28,28)
img_mat = img_mat.astype(np.uint8())
img_dat = Image.fromarray(img_mat)
img_dat.save(filepath)
| Digit Recognizer |
6,485,130 | RF = ensemble.RandomForestClassifier()
params = {
'n_estimators':[n for n in range(100, 250, 50)]
,'max_depth':[n for n in range(3, 8)]
,'min_samples_leaf': [n for n in range(3, 6, 1)]
,'max_features' : [None]
,'random_state' : [42]
}
RF_model = GridSearchCV(RF, param_grid = params, cv = 5, n_jobs = -1 ).fit(x_train, y_train)
print("Best Hyper Parameters:",RF_model.best_params_)
RF_probs = RF_model.predict_proba(x_test)
RF_probs = RF_probs[:, 1]
RF_auc = roc_auc_score(y_test, RF_probs)
print('AUC: %.3f' % RF_auc)
RF_predictions = RF_model.predict(x_test ).astype(int)
RF_accuracy = accuracy_score(y_test, RF_predictions)
print("RF accuracy: %.3f" % RF_accuracy)
print("RF Recall: " + '%.3f' % recall_score(y_test, RF_predictions))
print("RF Precission: " + '%.3f' % precision_score(y_test, RF_predictions))
print("RF cohen_kappa_score: %.3f" % cohen_kappa_score(y_test, RF_predictions))
plt.figure(figsize =(8, 6))
RF_fpr, RF_tpr, RF_thresholds = roc_curve(y_test, RF_probs)
plt.plot([0, 1], [0, 1], linestyle = '--')
plt.plot(RF_fpr, RF_tpr, color = 'tab:green')
plt.show()<predict_on_test> | tfms = get_transforms(do_flip = False)
| Digit Recognizer |
6,485,130 | data['churn_proba'] = RF_model.best_estimator_.predict_proba(data[x.columns])[:,1]<save_to_csv> | print('test : ',TEST)
print('train: ', TRAIN)
print(type(TEST)) | Digit Recognizer |
6,485,130 | predict_RF = RF_model.predict(test ).astype(int)
submit_RF = pd.DataFrame({'PassengerId': testing['PassengerId'],
'Survived': predict_RF})
filename_RF = 'Titanic Prediction RF.csv'
submit_RF.to_csv(filename_RF,index=False)
print('Saved file: ' + filename_RF )<load_from_csv> | data = ImageDataBunch.from_folder(
path =(".. /train"),
test =(".. /test"),
valid_pct = 0.1,
bs = 256,
size = 28,
num_workers = 0,
ds_tfms = tfms
) | Digit Recognizer |
6,485,130 | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()<load_from_csv> | data.normalize(imagenet_stats)
| Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.