kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
8,655,915 | test = test.drop('Survived', axis = 1)
test.head()<data_type_conversions> | lr=1e-03 | Digit Recognizer |
8,655,915 | train['Cabin'] = train['Cabin'].astype('int' )<data_type_conversions> | learn.fit_one_cycle(5, slice(lr)) | Digit Recognizer |
8,655,915 | test['Cabin'] = test['Cabin'].astype('int' )<train_model> | learn.save('stage-4' ) | Digit Recognizer |
8,655,915 | X_train = train.drop('Survived', axis = 1)
Y_train = train['Survived']
X_test = test.drop('PassengerId', axis = 1 ).copy()
print("X_train shape: ", X_train.shape)
print("Y_train shape: ", Y_train.shape)
print("X_test shape: ", X_test.shape )<compute_train_metric> | learn.fit_one_cycle(5, slice(1e-5, lr/5)) | Digit Recognizer |
8,655,915 | logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_train, Y_train)* 100, 2)
acc_log<compute_train_metric> | learn.fit_one_cycle(10, 1e-05, wd=0.5 ) | Digit Recognizer |
8,655,915 | svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train)* 100, 2)
acc_svc<predict_on_test> | class_score , y = learn.get_preds(DatasetType.Test ) | Digit Recognizer |
8,655,915 | knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
acc_knn = round(knn.score(X_train, Y_train)* 100, 2)
acc_knn<predict_on_test> | probabilities = class_score[0].tolist()
[f"{index}: {probabilities[index]}" for index in range(len(probabilities)) ] | Digit Recognizer |
8,655,915 | gaussian = GaussianNB()
gaussian.fit(X_train, Y_train)
Y_pred = gaussian.predict(X_test)
acc_gaussian = round(gaussian.score(X_train, Y_train)* 100, 2)
acc_gaussian<predict_on_test> | class_score = np.argmax(class_score, axis=1 ) | Digit Recognizer |
8,655,915 | perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_train, Y_train)* 100, 2)
acc_perceptron<compute_train_metric> | sample_submission = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv')
sample_submission.head() | Digit Recognizer |
8,655,915 | linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train)* 100, 2)
acc_linear_svc<compute_train_metric> | ImageId = [os.path.splitext(path)[0] for path in os.listdir(TEST)]
ImageId = [int(path)for path in ImageId]
ImageId = [ID+1 for ID in ImageId] | Digit Recognizer |
8,655,915 | <choose_model_class><EOS> | submission = pd.DataFrame({
"ImageId": ImageId,
"Label": class_score
})
submission.to_csv("submission.csv", index=False ) | Digit Recognizer |
8,530,185 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<predict_on_test> | train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
sample_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' ) | Digit Recognizer |
8,530,185 | random_forest = RandomForestClassifier(n_estimators = 100)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_test)
acc_random_forest = round(random_forest.score(X_train, Y_train)* 100, 2)
acc_random_forest<choose_model_class> | class MnistDataset(Dataset):
def __init__(self, df, transforms=None, train=True):
self.train = train
self.transforms = transforms
self.X = df.loc[:, df.columns != 'label'].to_numpy(float)
self.X = torch.from_numpy(self.X)
if train:
self.y = df.get('label' ).to_numpy()
self.y = torch.from_numpy(self.y)
def __len__(self):
return self.X.shape[0]
def __getitem__(self, i):
x = self.X[i].view(1, 28, 28 ).expand(3, 28, 28)
if self.transforms: x = self.transforms(x)
if self.train: return x, self.y[i]
return x
| Digit Recognizer |
8,530,185 | catboost = CatBoostClassifier()
catboost.fit(X_train, Y_train)
Y_pred = catboost.predict(X_test)
acc_catboost = round(catboost.score(X_train, Y_train)* 100, 2 )<create_dataframe> | normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225] ) | Digit Recognizer |
8,530,185 | models = pd.DataFrame({'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Naive Bayes', 'Perceptron', 'Stochastic Gradient Decent',
'Linear SVC', 'Decision Tree', 'CatBoost'],
'Score': [acc_svc, acc_knn, acc_log, acc_random_forest, acc_gaussian, acc_perceptron,
acc_sgd, acc_linear_svc, acc_decision_tree, acc_catboost]})
models.sort_values(by = 'Score', ascending = False, ignore_index = True )<choose_model_class> | ds = MnistDataset(train_df, normalize)
len(ds ) | Digit Recognizer |
8,530,185 | classifiers = []
classifiers.append(LogisticRegression())
classifiers.append(SVC())
classifiers.append(KNeighborsClassifier(n_neighbors = 5))
classifiers.append(GaussianNB())
classifiers.append(Perceptron())
classifiers.append(LinearSVC())
classifiers.append(SGDClassifier())
classifiers.append(DecisionTreeClassifier())
classifiers.append(RandomForestClassifier())
classifiers.append(CatBoostClassifier())
len(classifiers )<compute_train_metric> | val_len = int(len(ds)*0.01)
train_len = len(ds)- val_len
train_ds, val_ds = random_split(ds, [train_len, val_len])
len(train_ds), len(val_ds ) | Digit Recognizer |
8,530,185 | cv_results = []
for classifier in classifiers:
cv_results.append(cross_val_score(classifier, X_train, Y_train, scoring = 'accuracy', cv = 10))<create_dataframe> | bs = 512
num_workers = 2
train_dl = DataLoader(train_ds, bs, num_workers=num_workers)
val_dl = DataLoader(val_ds, bs, num_workers=num_workers ) | Digit Recognizer |
8,530,185 | cv_res = pd.DataFrame({'Cross Validation Mean': cv_mean, 'Cross Validation Std': cv_std, 'Algorithm': ['Logistic Regression', 'Support Vector Machines', 'KNN', 'Gausian Naive Bayes', 'Perceptron', 'Linear SVC', 'Stochastic Gradient Descent', 'Decision Tree', 'Random Forest', 'CatBoost']})
cv_res.sort_values(by = 'Cross Validation Mean', ascending = False, ignore_index = True )<train_on_grid> | resnet18 = models.resnet18(pretrained=True)
resnet18.fc | Digit Recognizer |
8,530,185 | param_grid = {'learning_rate': [0.03, 0.1],
'depth': [4, 6, 10],
'l2_leaf_reg': [1, 3, 5, 7, 9]}
grid = GridSearchCV(CatBoostClassifier() , param_grid = param_grid,
cv = 3, refit=True, verbose = True)
grid.fit(X_train, Y_train )<train_model> | lin_in = resnet18.fc.in_features
resnet18.fc = nn.Sequential(
nn.Linear(lin_in, 10)
) | Digit Recognizer |
8,530,185 | print("Best parameters: ", grid.best_params_)
print("Best estimator: ", grid.best_estimator_ )<predict_on_test> | device = torch.device('cuda:0')if torch.cuda.is_available() else 'cpu'
net = net.to(device)
device | Digit Recognizer |
8,530,185 | catboost = CatBoostClassifier(depth=10, l2_leaf_reg=5, learning_rate=0.1)
catboost.fit(X_train, Y_train)
Y_pred = catboost.predict(X_test)
acc_catboost = round(catboost.score(X_train, Y_train)* 100, 2 )<compute_train_metric> | epochs = 10
loss_fn = nn.CrossEntropyLoss()
o = optim.Adam(net.parameters() ,lr=0.001 ) | Digit Recognizer |
8,530,185 | cross_val_score(catboost, X_train, Y_train, scoring = 'accuracy', cv = 10 ).mean()<create_dataframe> | bs = 640
test_ds = MnistDataset(test_df, normalize, train = False)
test_dl = DataLoader(test_ds, bs ) | Digit Recognizer |
8,530,185 | output = pd.DataFrame({'PassengerId': test['PassengerId'], 'Survived': Y_pred} )<save_to_csv> | outputs = []
with torch.no_grad() :
for images in test_dl:
images = images.to(device, dtype=torch.float)
out = net(images)
outputs.extend(out.argmax(dim=1 ).tolist() ) | Digit Recognizer |
8,530,185 | <train_model><EOS> | sample_df['Label'] = outputs
sample_df.to_csv('submission.csv', index=False ) | Digit Recognizer |
6,956,162 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options> | warnings.filterwarnings('ignore')
%matplotlib inline
mpl.style.use('ggplot')
sns.set_style('white')
pylab.rcParams[ 'figure.figsize' ] = 8 , 6
| Digit Recognizer |
6,956,162 | pd.set_option('max_columns', 90)
PALETTE = ['
BACKCOLOR = '
sns.set_palette(PALETTE)
warnings.filterwarnings('ignore')
mpl.rcParams['figure.dpi'] = 120
mpl.rcParams['axes.spines.top'] = False
mpl.rcParams['axes.spines.right'] = False<load_from_csv> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
test = pd.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
6,956,162 | train = pd.read_csv('.. /input/titanic/train.csv')
test = pd.read_csv('.. /input/titanic/test.csv')
all_data = pd.concat(( train, test)).reset_index(drop=True )<categorify> | df = train.copy() | Digit Recognizer |
6,956,162 | def multi_table(table_list):
return HTML(
f"<table><tr> {''.join(['<td>' + table._repr_html_() + '</td>' for table in table_list])} </tr></table>" )<count_values> | y = df.label.values.astype('int32')
df = df[df.columns[1:]].values.astype('float32')
X_train , X_test , y_train , y_test = train_test_split(df , y , test_size = 0.2 , random_state = 100 ) | Digit Recognizer |
6,956,162 | multi_table([pd.DataFrame(all_data[i].value_counts())for i in all_data.columns] )<define_variables> | test = test.values.astype('float32' ) | Digit Recognizer |
6,956,162 | numerical_vars = ['Age', 'SibSp', 'Parch', 'Fare']
ordinal_vars = ['Pclass']
nominal_vars = ['Survived', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']<create_dataframe> | new = pd.read_csv('.. /input/digit-recognizer/train.csv' ) | Digit Recognizer |
6,956,162 | train0 = train[train.Survived == 0]
train1 = train[train.Survived == 1]
cnt = 0
detail_desc = []
for c in train.columns:
if c == 'PassengerId':
continue
if train[c].dtypes != 'object':
desc = pd.DataFrame(columns=['feature', 'data', 'type', 'count', 'mean', 'median', 'std', 'min', 'max', 'skew', 'null'])
desc.loc[0] = [c, 'Train', train[c].dtype.name, train[c].count() , train[c].mean() , train[c].median() , train[c].std() , train[c].min() , train[c].max() , train[c].skew() , train[c].isnull().sum() ]
desc.loc[1] = [c, 'All', train[c].dtype.name, all_data[c].count() , all_data[c].mean() , all_data[c].median() , all_data[c].std() , all_data[c].min() , all_data[c].max() , all_data[c].skew() , all_data[c].isnull().sum() ]
desc.loc[2] = [c, 'Target=0', train0[c].dtype.name, train0[c].count() , train0[c].mean() , train0[c].median() , train0[c].std() , train0[c].min() , train0[c].max() , train0[c].skew() , train0[c].isnull().sum() ]
desc.loc[3] = [c, 'Target=1', train1[c].dtype.name, train1[c].count() , train1[c].mean() , train1[c].median() , train1[c].std() , train1[c].min() , train1[c].max() , train1[c].skew() , train1[c].isnull().sum() ]
desc = desc.set_index(['feature', 'data'],drop=True)
detail_desc.append(desc.style.background_gradient() )<create_dataframe> | label_counter = new.label.value_counts()
print(label_counter ) | Digit Recognizer |
6,956,162 | train0 = train[train.Survived == 0]
train1 = train[train.Survived == 1]
cnt = 0
detail_desc = []
for c in train.columns:
if c == 'PassengerId':
continue
if train[c].dtypes == 'object':
desc = pd.DataFrame(columns=['feature', 'data', 'type', 'count', 'null', 'mode', 'value_count'])
desc.loc[0] = [c, 'Train', train[c].dtype.name, train[c].count() , train[c].isnull().sum() , train[c].mode() , train[c].value_counts() ]
desc = desc.set_index(['feature', 'data'],drop=True)
detail_desc.append(desc.style.background_gradient() )<create_dataframe> | X_train = X_train / 255
X_test = X_test / 255
test = test / 255 | Digit Recognizer |
6,956,162 | multi_table(detail_desc )<feature_engineering> | y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10 ) | Digit Recognizer |
6,956,162 | tmp_train = copy.deepcopy(train)
tmp_train['AgeBin'] = 6
for i in range(6):
tmp_train.loc[(tmp_train.Age >= 10*i)&(tmp_train.Age < 10*(i + 1)) , 'AgeBin'] = i
tmp_train.head(3 )<concatenate> | from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ReduceLROnPlateau | Digit Recognizer |
6,956,162 | cat_dist(tmp_train, var='AgeBin', hue='Survived', msg_show=False )<categorify> | model = Sequential()
model.add(Conv2D(32,(3, 3), activation='relu', padding='same',input_shape=(28 , 28 , 1)))
model.add(Conv2D(32,(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3, 3), activation='relu', padding='same'))
model.add(Conv2D(64,(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
6,956,162 | tmp_train = copy.deepcopy(train)
tmp_train['FareBin'] = pd.cut(tmp_train.Fare, 10)
tmp_train['FareBin'] = LabelEncoder().fit_transform(tmp_train.FareBin)
tmp_train.head(3 )<concatenate> | model.compile(optimizer = RMSprop(lr=0.001),loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
6,956,162 | cat_dist(tmp_train, var='FareBin', hue='Survived', msg_show=False )<concatenate> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train ) | Digit Recognizer |
6,956,162 | cat_dist(train, var='Embarked', hue='Survived' )<create_dataframe> | batch_size = 64
epochs = 10
lr_reduce = ReduceLROnPlateau(monitor='val_acc', factor=0.1, epsilon=0.0001, patience=1, verbose=1 ) | Digit Recognizer |
6,956,162 | tmp_all_data = copy.deepcopy(all_data)
t0 = pd.DataFrame(tmp_all_data.Name)
t1 = pd.DataFrame(tmp_all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip() ).value_counts())
multi_table([t0, t1] )<feature_engineering> | model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[lr_reduce],
validation_data=(X_test, y_test),
epochs = epochs, verbose = 2 ) | Digit Recognizer |
6,956,162 | tmp_all_data['Title'] = tmp_all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
<count_values> | score = model.evaluate(X_test, y_test, verbose=0)
print('valid loss:', score[0])
print('valid accuracy:', score[1] ) | Digit Recognizer |
6,956,162 | tmp_train.Cabin.value_counts()<feature_engineering> | pred = model.predict(test ) | Digit Recognizer |
6,956,162 | tmp_train['CabinCnt'] = tmp_train.Cabin.apply(lambda x: 0 if pd.isna(x)else len(x.split(' ')))
tmp_train['CabinClass'] = tmp_train.Cabin.apply(lambda x: str(x)[0] )<create_dataframe> | pred_digits = np.argmax(pred , axis = 1)
ImageId = range(1 , len(pred_digits)+1 ) | Digit Recognizer |
6,956,162 | t0 = pd.DataFrame(tmp_train.CabinCnt.value_counts())
t1 = pd.DataFrame(tmp_train.CabinClass.value_counts())
multi_table([t0, t1] )<concatenate> | context = {"ImageId" : ImageId , "Label" : pred_digits }
ans = pd.DataFrame(context ) | Digit Recognizer |
6,956,162 | cat_dist(tmp_train, var='CabinCnt', hue='Survived', msg_show=False )<concatenate> | ans.to_csv("Predictions by CNN.csv", index=None ) | Digit Recognizer |
3,995,435 | cat_dist(tmp_train, var='CabinClass', hue='Survived', msg_show=False )<feature_engineering> | import keras
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split | Digit Recognizer |
3,995,435 | tmp_train['IsNumericTicket'] = tmp_train.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
tmp_train['TicketType'] = tmp_train.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1] ).replace('.','' ).replace('/','' ).lower() if len(x.split(' ')[:-1])> 0 else 0 )<concatenate> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
y_train = train['label']
x_train = train.drop(labels=['label'] ,axis=1)
del train
x_train.isnull().any().describe()
test.isnull().any().describe() | Digit Recognizer |
3,995,435 | cat_dist(tmp_train, var='IsNumericTicket', hue='Survived' )<drop_column> | x_train = x_train / 255.0
test = test / 255.0 | Digit Recognizer |
3,995,435 | all_data['Age'] = all_data.Age.fillna(train.Age.median())
all_data['Fare'] = all_data.Fare.fillna(train.Fare.median())
all_data.dropna(subset=['Embarked'], inplace=True)
cabins = all_data.Cabin
all_data.drop(['Cabin'], axis=1, inplace=True )<feature_engineering> | NUM_CLASSES = 10
y_train = to_categorical(y_train, num_classes=NUM_CLASSES ) | Digit Recognizer |
3,995,435 | all_data['CabinCnt'] = cabins.apply(lambda x: 0 if pd.isna(x)else len(x.split(' ')))
all_data['CabinClass'] = cabins.apply(lambda x: str(x)[0])
all_data['IsNumericTicket'] = all_data.Ticket.apply(lambda x: 1 if x.isnumeric() else 0)
all_data['TicketType'] = all_data.Ticket.apply(lambda x: ''.join(x.split(' ')[:-1] ).replace('.','' ).replace('/','' ).lower() if len(x.split(' ')[:-1])> 0 else 0)
all_data['Title'] = all_data.Name.apply(lambda x: x.split(',')[1].split('.')[0].strip())
all_data['Family'] = all_data.SibSp + all_data.Parch<normalization> | random_seed = 2
x_train, x_test, y_train, y_test = train_test_split(x_train, y_train, test_size=0.1, random_state=random_seed ) | Digit Recognizer |
3,995,435 | numeric_vars = ['Age', 'SibSp', 'Parch', 'Fare', 'CabinCnt', 'Family']
ordinal_vars = ['Pclass']
nominal_vars = ['Name', 'Sex', 'Ticket', 'Embarked', 'CabinClass', 'IsNumericTicket', 'TicketType', 'Title']
all_data[nominal_vars] = all_data[nominal_vars].astype('str')
for feature in numeric_vars:
all_data[feature] = np.log1p(all_data[feature])
scaler = StandardScaler()
numeric_vars = all_data.columns[(all_data.dtypes != 'object')&(all_data.columns != 'PassengerId')&(all_data.columns != 'Survived')&(all_data.columns != 'IsTrain')]
all_data[numeric_vars] = scaler.fit_transform(all_data[numeric_vars] )<prepare_x_and_y> | %matplotlib inline
print(x_train.shape)
test_show = plt.imshow(x_train[0][:,:,0] ) | Digit Recognizer |
3,995,435 | all_data.drop(['PassengerId', 'Name', 'Ticket'], axis=1, inplace=True)
data_dummies = pd.get_dummies(all_data)
X_train = data_dummies[data_dummies.Survived.notnull() ].drop(['Survived'], axis=1)
y_train = data_dummies[data_dummies.Survived.notnull() ].Survived
X_test = data_dummies[data_dummies.Survived.isnull() ].drop(['Survived'], axis=1 )<count_values> | model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), padding='Same', activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(filters=32, kernel_size=(5,5), padding='Same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same', activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='Same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES, activation='softmax')) | Digit Recognizer |
3,995,435 | all_data.Title = all_data.Title.apply(lambda x: 'Others' if x in list(all_data.Title.value_counts() [all_data.Title.value_counts() < 8].index)else x)
all_data.TicketType = all_data.TicketType.apply(lambda x: 'Others' if x in list(all_data.TicketType.value_counts() [all_data.TicketType.value_counts() < 10].index)else x )<prepare_x_and_y> | model.compile(optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0), loss='categorical_crossentropy', metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 30
batch_size = 86 | Digit Recognizer |
3,995,435 | data_dummies = pd.get_dummies(all_data)
X_train = data_dummies[data_dummies.Survived.notnull() ].drop(['Survived'], axis=1)
X_test = data_dummies[data_dummies.Survived.isnull() ].drop(['Survived'], axis=1 )<train_model> | params = {
'rotation_range':10,
'zoom_range':0.1,
'width_shift_range':0.1,
'height_shift_range':0.1,
'featurewise_center':False,
'samplewise_center':False,
'featurewise_std_normalization':False,
'samplewise_std_normalization':False,
'zca_whitening':False,
'horizontal_flip':False,
'vertical_flip':False
}
datagen = ImageDataGenerator(**params)
datagen.fit(x_train ) | Digit Recognizer |
3,995,435 | allow_tuning = False<split> | hist = model.fit_generator(
datagen.flow(x_train, y_train, batch_size=batch_size),
epochs=epochs,
validation_data=(x_test, y_test),
verbose=1,
steps_per_epoch=x_train.shape[0] // batch_size,
callbacks=[reduce_lr]
) | Digit Recognizer |
3,995,435 | def xgb_gridsearch(params_grid_xgb, features, values, X, y, last=False):
x_train, x_test = train_test_split(X, test_size=.2, random_state=42)
y_train_tmp, y_test_tmp = train_test_split(y, test_size=.2, random_state=42)
cv = RepeatedStratifiedKFold(n_splits = 10, n_repeats = 3, random_state = 42)
model_xgb = XGBClassifier(use_label_encoder = False,
objective = 'binary:logistic')
for i in range(len(features)) :
params_grid_xgb[features[i]] = values[i]
search_xgb = GridSearchCV(model_xgb, params_grid_xgb, verbose = 0,
scoring = 'neg_log_loss', cv = cv ).fit(x_train, y_train_tmp, early_stopping_rounds = 15,
eval_set = [[x_test, y_test_tmp]],
eval_metric = 'logloss', verbose = False)
for i in range(len(features)) :
print(f"{features[i]}: {search_xgb.best_params_[features[i]]}")
if not last:
for k, v in search_xgb.best_params_.items() :
search_xgb.best_params_[k] = [v]
return search_xgb, search_xgb.best_params_<train_on_grid> | submission = pd.concat([pd.Series(range(1,28001), name="ImageId"), results], axis=1)
submission.to_csv('mnist_datagen.csv', index=False ) | Digit Recognizer |
3,971,606 | if allow_tuning:
params_knn = {
'n_neighbors' : range(1, 10),
'weights' : ['uniform', 'distance'],
'algorithm' : ['auto', 'ball_tree','kd_tree'],
'p' : [1,2]
}
model_knn = knn()
search_knn = GridSearchCV(model_knn, params_knn, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train)
print(search_knn.best_params_ )<train_on_grid> | train = pd.read_csv(".. /input/train.csv")
test= pd.read_csv(".. /input/test.csv")
print("Train size:{}
Test size:{}".format(train.shape, test.shape))
x_train = train.drop(['label'], axis=1 ).values.astype('float32')
y_train = train['label'].values.astype('int32')
x_test = test.values.astype('float32')
x_train = x_train.reshape(x_train.shape[0], 28, 28)/ 255.0
x_test = x_test.reshape(x_test.shape[0], 28, 28)/ 255.0
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.05, random_state=42)
print(x_train.shape)
print(x_val.shape)
print(y_train.shape)
print(x_test.shape ) | Digit Recognizer |
3,971,606 | if allow_tuning:
params_logistic = {
'max_iter': [2000],
'penalty': ['l1', 'l2'],
'C': np.logspace(-4, 4, 20),
'solver': ['liblinear']
}
model_logistic = LogisticRegression()
search_logistic = GridSearchCV(model_logistic, params_logistic, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train)
print(search_logistic.best_params_ )<train_on_grid> | x_train = x_train.reshape(x_train.shape[0], 28, 28,1)
x_val = x_val.reshape(x_val.shape[0], 28, 28,1)
x_test = x_test.reshape(x_test.shape[0], 28, 28,1)
print("Train size:{}
validation size:{}
Test size:{}".format(x_train.shape,x_val.shape, x_test.shape))
mean_px = x_train.mean().astype(np.float32)
std_px = x_train.std().astype(np.float32)
| Digit Recognizer |
3,971,606 | if allow_tuning:
params_svc = [{'kernel': ['rbf'], 'gamma': [.01,.1,.5, 1, 2, 5, 10], 'C': [.1, 1, 10, 100, 1000], 'probability': [True]},
{'kernel': ['poly'], 'degree' : [2, 3, 4, 5], 'C': [.01,.1, 1, 10, 100, 1000], 'probability': [True]}]
model_svc = SVC()
search_svc = GridSearchCV(model_svc, params_svc, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train)
print(search_svc.best_params_ )<train_on_grid> | input = Input(shape=[28, 28, 1])
x = Conv2D(32,(5, 5), strides=1, padding='same', name='conv1' )(input)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch1' )(x)
x = Activation('relu', name='relu1' )(x)
x = Conv2D(32,(5, 5), strides=1, padding='same', name='conv2' )(x)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch2' )(x)
x = Activation('relu', name='relu2' )(x)
x = Conv2D(32,(5, 5), strides=1, padding='same', name='conv2add' )(x)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch2add' )(x)
x = Activation('relu', name='relu2add' )(x)
x = Dropout(0.15 )(x)
x = MaxPool2D(pool_size=2, strides=2, padding='same' )(x)
x = Conv2D(64,(3, 3), strides=1, padding='same', name='conv3' )(x)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch3' )(x)
x = Activation('relu', name='relu3' )(x)
x = Conv2D(64,(3, 3), strides=1, padding='same', name='conv4' )(x)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch4' )(x)
x = Activation('relu', name='relu4' )(x)
x = Conv2D(32,(3, 3), strides=1, padding='same', name='conv5' )(x)
x = BatchNormalization(momentum=0.1, epsilon=1e-5, gamma_initializer='uniform',name='batch5' )(x)
x = Activation('relu', name='relu5' )(x)
x = Dropout(0.15 )(x)
x = MaxPool2D(pool_size=2, strides=2 )(x)
x = Flatten()(x)
x = Dense(100, name='Dense30' )(x)
x = Activation('relu', name='relu6' )(x)
x = Dropout(0.05 )(x)
x = Dense(10, name='Dense10' )(x)
x = Activation('softmax' )(x)
model = Model(inputs = input, outputs =x)
print(model.summary() ) | Digit Recognizer |
3,971,606 | if allow_tuning:
params_svc = {'kernel': ['rbf'], 'gamma': [i/10000 for i in range(90, 110)], 'C': range(50, 80, 10), 'probability': [True]}
model_svc = SVC()
search_svc = GridSearchCV(model_svc, params_svc, cv=5, scoring='accuracy', n_jobs=-1, verbose=1 ).fit(X_train, y_train)
print(search_svc.best_params_ )<choose_model_class> | checkpoint = ModelCheckpoint("best_weights.hdf5", monitor='val_acc', verbose=1, save_best_only=True, mode='max')
datagen = ImageDataGenerator(
rotation_range= 8,
zoom_range = 0.13,
width_shift_range=0.13,
height_shift_range=0.13)
epochs = 60
lr_initial = 0.0011
optimizer = Adam(lr=lr_initial, decay= lr_initial /(epochs*1.3))
model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
datagen.fit(x_train)
batch_size = 64
history = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size),
epochs = epochs, validation_data =(x_val,y_val),
verbose = 1, steps_per_epoch=x_train.shape[0] // batch_size, callbacks=[checkpoint])
model.load_weights("best_weights.hdf5")
| Digit Recognizer |
3,971,606 | <train_on_grid><EOS> | results = model.predict(x_test)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1)
submission.to_csv("MNIST-CNN-ENSEMBLE.csv",index=False ) | Digit Recognizer |
5,217,179 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams> | !pip install neural-structured-learning
!pip install tensorflow-gpu==2.1.0 | Digit Recognizer |
5,217,179 | if allow_tuning:
params_xgb = {'n_estimators': [1000],
'learning_rate': [0.1],
'max_depth': [5],
'min_child_weight': [1],
'gamma': [0],
'subsample': [0.8],
'colsample_bytree': [0.8],
'n_jobs': [-1],
'objective': ['binary:logistic'],
'use_label_encoder': [False],
'eval_metric': ['logloss'],
'scale_pos_weight': [1]}
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['learning_rate'],
[[0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.15, 0.2]],
X_train, y_train)
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['max_depth', 'min_child_weight'],
[range(3, 10), range(1, 6)],
X_train, y_train)
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['gamma'],
[[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2]],
X_train, y_train)
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['subsample', 'colsample_bytree'],
[[i/100.0 for i in range(75,90,5)], [i/100.0 for i in range(75,90,5)]],
X_train, y_train)
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['reg_alpha'],
[[1e-5, 1e-2, 0.1, 1, 100]],
X_train, y_train)
params_xgb['n_estimators'] = [5000]
search_xgb, params_xgb = xgb_gridsearch(params_xgb,
['learning_rate'],
[[0.001, 0.005, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.15, 0.2]],
X_train, y_train, last=True)
x_train, x_test = train_test_split(X_train, test_size=.2, random_state=42)
y_train_tmp, y_test_tmp = train_test_split(y_train, test_size=.2, random_state=42)
model_xgb = XGBClassifier(**params_xgb)
model_xgb = model_xgb.fit(x_train, y_train_tmp, eval_set=[(x_test, y_test_tmp)], eval_metric=['logloss'], early_stopping_rounds=15, verbose=0)
search_xgb.best_estimator_.n_estimators = model_xgb.best_iteration<choose_model_class> | train = pd.read_csv('/kaggle/input/train.csv')
train.head() | Digit Recognizer |
5,217,179 | if allow_tuning:
model_knn = search_knn.best_estimator_
model_logistic = search_logistic.best_estimator_
model_svc = search_svc.best_estimator_
model_rf = search_rf.best_estimator_
model_xgb = search_xgb.best_estimator_
else:
model_knn = knn(algorithm='auto',
n_neighbors=9,
p=1,
weights='uniform')
model_logistic = LogisticRegression(C=0.08858667904100823,
max_iter=2000,
penalty='l2',
solver='liblinear')
model_svc = SVC(C=70,
gamma=0.0106,
kernel='rbf',
probability=True)
model_rf = RandomForestClassifier(bootstrap=True,
criterion='entropy',
max_depth=50, max_features=6,
min_samples_leaf=1,
min_samples_split=10,
n_estimators=100,
random_state=734)
model_xgb = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.8,
enable_categorical=False, eval_metric='logloss', gamma=0.8,gpu_id=-1, importance_type=None, interaction_constraints='',
learning_rate=0.15, max_delta_step=0, max_depth=5,
min_child_weight=1, missing=np.nan, monotone_constraints='() ',
n_estimators=15, n_jobs=-1, num_parallel_tree=1, predictor='auto',
random_state=0, reg_alpha=1e-05, reg_lambda=1, scale_pos_weight=1,
subsample=0.8, tree_method='exact', use_label_encoder=False,
validate_parameters=1, verbosity=0)
models = {
'knn': model_knn,
'logistic': model_logistic,
'svc': model_svc,
'rf': model_rf,
'xgb': model_xgb
}<choose_model_class> | def get_labels(train):
return np.array(train['label'])
def get_features(train):
features = train.drop(['label'], axis=1)
features_normalized = features / 255
return features_normalized | Digit Recognizer |
5,217,179 | def select_models(start, cnt, goal, estimators, voting):
if cnt == goal:
estimators_copy = copy.deepcopy(estimators)
voting_name = f'{voting}_' + '_'.join([i[0] for i in list(estimators_copy)])
models[voting_name] = VotingClassifier(estimators=estimators_copy, voting=voting)
return
for i in range(start, 5):
estimators.append(list(models.items())[i])
select_models(i + 1, cnt + 1, goal, estimators, voting)
estimators.pop()<define_search_space> | def get_generator_dict(in_gen, should_augment=True):
if should_augment:
image_gen = ImageDataGenerator(fill_mode='reflect',
data_format='channels_last',
brightness_range=[0.5, 1.5])
else:
image_gen = ImageDataGenerator(fill_mode='reflect',
data_format='channels_last',
brightness_range=[1, 1])
for items in in_gen:
if len(items)== 2:
in_x, in_y = items
else:
in_x, in_y, weights = items
g_x = image_gen.flow(255 * in_x, in_y, batch_size=in_x.shape[0])
x, y = next(g_x)
yield {'feature': x / 255.0, 'label': y} | Digit Recognizer |
5,217,179 | select_models(0, 0, 2, [], 'hard')
select_models(0, 0, 3, [], 'hard')
select_models(0, 0, 4, [], 'hard')
select_models(0, 0, 5, [], 'hard')
select_models(0, 0, 2, [], 'soft')
select_models(0, 0, 3, [], 'soft')
select_models(0, 0, 4, [], 'soft')
select_models(0, 0, 5, [], 'soft' )<create_dataframe> | def get_model(shape, n_class):
model = Sequential()
model.add(Conv2D(64,(4,4), input_shape=shape))
model.add(MaxPool2D(( 2,2)))
model.add(BatchNormalization())
model.add(tf.keras.layers.Activation("selu")) ,
model.add(Conv2D(128,(3,3)))
model.add(MaxPool2D(( 2,2)))
model.add(BatchNormalization())
model.add(tf.keras.layers.Activation("selu")) ,
model.add(Conv2D(256,(2,2)))
model.add(MaxPool2D(( 2,2)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Flatten())
model.add(tf.keras.layers.Activation("selu"))
model.add(Dense(64, kernel_initializer="he_normal", use_bias=False))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(tf.keras.layers.Activation("selu"))
model.add(Dense(32, kernel_initializer="he_normal", use_bias=False))
model.add(BatchNormalization())
model.add(Dense(n_class, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
| Digit Recognizer |
5,217,179 | result_by_model = pd.DataFrame({'model name': models.keys() , 'model': models.values() , 'score': 0} )<compute_train_metric> | def get_adv_model(shape, n_class):
model = Sequential([
Conv2D(64,(4,4), input_shape=shape, dynamic=True),
MaxPool2D(( 2,2)) ,
BatchNormalization() ,
tf.keras.layers.Activation("selu"),
Conv2D(128,(3,3)) ,
MaxPool2D(( 2,2)) ,
BatchNormalization() ,
tf.keras.layers.Activation("selu"),
Conv2D(256,(2,2)) ,
MaxPool2D(( 2,2)) ,
Dropout(0.2),
BatchNormalization() ,
Flatten() ,
tf.keras.layers.Activation("selu"),
Dense(64, kernel_initializer="he_normal", use_bias=False),
Dropout(0.2),
BatchNormalization() ,
tf.keras.layers.Activation("selu"),
Dense(32, kernel_initializer="he_normal", use_bias=False),
BatchNormalization() ,
Dense(n_class, activation='softmax')])
adv_config = nsl.configs.make_adv_reg_config(multiplier=0.2, adv_step_size=0.05)
adv_model = nsl.keras.AdversarialRegularization(model, adv_config=adv_config)
adv_model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'], run_eagerly=True)
return adv_model
| Digit Recognizer |
5,217,179 | for name, model in models.items() :
result_by_model.loc[result_by_model['model name'] == name, 'score'] = cross_val_score(model, X_train,y_train,cv=5 ).mean()<sort_values> | def get_sample_model(shape, n_class):
model = tf.keras.Sequential([
tf.keras.Input(shape, name='feature'),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(n_class, activation=tf.nn.softmax)
])
adv_config = nsl.configs.make_adv_reg_config(multiplier=0.2, adv_step_size=0.05)
adv_model = nsl.keras.AdversarialRegularization(model, adv_config=adv_config)
adv_model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return adv_model | Digit Recognizer |
5,217,179 | result_by_model.sort_values('score', ascending=False ).reset_index(drop=True )<save_to_csv> | features = get_features(train)
labels = get_labels(train)
| Digit Recognizer |
5,217,179 | model_name = 'rf'
models[model_name].fit(X_train, y_train)
y_pred = models[model_name].predict(X_test ).astype('int')
submission = pd.DataFrame({'PassengerId': test.PassengerId,
'Survived': y_pred})
submission.to_csv('submission.csv', index = False )<import_modules> | model = get_adv_model(x[0].shape, 10)
print(model ) | Digit Recognizer |
5,217,179 | import numpy as np
import pandas as pd
from sklearn import ensemble
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_validate,GridSearchCV
import lightgbm as lgbm
from collections import Counter<load_from_csv> | train_x = x[:36000]
test_x = x[36000:]
labels_train = labels[:36000]
labels_test = labels[36000:] | Digit Recognizer |
5,217,179 | train = pd.read_csv(".. /input/titanic/train.csv")
test = pd.read_csv(".. /input/titanic/test.csv" )<prepare_x_and_y> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(train_x ) | Digit Recognizer |
5,217,179 | y = train['Survived']
X = train.drop(['Survived'], axis=1)
X_test = test<define_search_space> | earlyStop = tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=10)
reduceLROnPlateau = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 ) | Digit Recognizer |
5,217,179 | params={"n_estimators":np.arange(50,200,10),
"max_depth":np.arange(1,11,1),
"learning_rate":[0.1,0.01,0.001]
}<train_on_grid> | model.fit(datagen.flow(train_x, labels_train, batch_size=32),
steps_per_epoch=len(train_x)// 32,
epochs=5)
| Digit Recognizer |
5,217,179 | xgb_est=lgbm.LGBMClassifier(
random_state=42,
objective='binary',
eval_metric="auc"
)
gr_xgb_est=GridSearchCV(xgb_est,param_grid=params,cv=5,n_jobs=-1,verbose=10)
gr_xgb_est.fit(X,y )<find_best_params> | model.evaluate({'feature': test_x, 'label': np.array(labels_test)} ) | Digit Recognizer |
5,217,179 | gr_xgb_est.best_estimator_.get_params()<predict_on_test> | test = pd.read_csv('/kaggle/input/test.csv')
test.head() | Digit Recognizer |
5,217,179 | pred_test = gr_xgb_est.predict(X_test )<save_to_csv> | test_normalized = test/ 255
x_test = np.array(test_normalized ).reshape(-1,28,28,1 ) | Digit Recognizer |
5,217,179 | submission = pd.read_csv('.. /input/titanic/gender_submission.csv')
submission['Survived'] =(pred_test > 0.5 ).astype(int)
submission.to_csv('sub.csv', index=False)
submission.head()<set_options> | lo = [0] * len(x_test)
predictions = model.predict({'feature':x_test, 'label':np.array(lo)} ) | Digit Recognizer |
5,217,179 | seed = 50
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False<set_options> | response = []
for i, prediction in enumerate(predictions):
response.append([i + 1, np.argmax(prediction)])
resposta = pd.DataFrame(response, columns=['ImageId', 'Label'])
resposta.to_csv('output.csv', index=False)
| Digit Recognizer |
7,366,854 | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device<load_from_csv> | print(tf.__version__ ) | Digit Recognizer |
7,366,854 | data_path = '/kaggle/input/plant-pathology-2020-fgvc7/'
train = pd.read_csv(data_path + 'train.csv')
test = pd.read_csv(data_path + 'test.csv')
submission = pd.read_csv(data_path + 'sample_submission.csv' )<split> | train=pd.read_csv(".. /input/digit-recognizer/train.csv")
test=pd.read_csv(".. /input/digit-recognizer/test.csv")
print("Train Shape: {}".format(train.shape))
print("Test Shape: {}".format(test.shape)) | Digit Recognizer |
7,366,854 | train, valid = train_test_split(train,
test_size=0.1,
stratify=train[['healthy', 'multiple_diseases', 'rust', 'scab']],
random_state=50 )<normalization> | print("Train Nulls: {}".format(train.isna().any().sum()))
print("Test Nulls: {}".format(test.isna().any().sum())) | Digit Recognizer |
7,366,854 | class ImageDataset(Dataset):
def __init__(self, df, img_dir='./', transform=None, is_test=False):
super().__init__()
self.df = df
self.img_dir = img_dir
self.transform = transform
self.is_test = is_test
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img_id = self.df.iloc[idx, 0]
img_path = self.img_dir + img_id + '.jpg'
image = cv2.imread(img_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.transform is not None:
image = self.transform(image=image)['image']
if self.is_test:
return image
else:
label = np.argmax(self.df.iloc[idx, 1:5])
return image, label<import_modules> | y = train['label'].astype('int8')
X = train.drop(columns=['label'] ).astype('float16' ).values
X = X.reshape(42000, 28,28, 1 ) | Digit Recognizer |
7,366,854 | import albumentations as A
from albumentations.pytorch import ToTensorV2<categorify> | X_train, X_validation, y_train, y_validation = train_test_split(X, y, test_size=0.2 ) | Digit Recognizer |
7,366,854 | transform_test = A.Compose([
A.Resize(450, 650),
A.Normalize() ,
ToTensorV2()
] )<create_dataframe> | train_image_generator = ImageDataGenerator(
rescale=1./255
)
validation_image_generator = ImageDataGenerator(
rescale=1./255
)
train_image_gen = train_image_generator.flow(
x=X_train,
y=y_train,
)
validation_image_gen = validation_image_generator.flow(
x=X_validation,
y=y_validation,
) | Digit Recognizer |
7,366,854 | img_dir = '/kaggle/input/plant-pathology-2020-fgvc7/images/'
dataset_train = ImageDataset(train, img_dir=img_dir, transform=transform_train)
dataset_valid = ImageDataset(valid, img_dir=img_dir, transform=transform_test )<define_variables> | model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3,3), activation='relu',padding='same', input_shape=(28, 28, 1)) ,
tf.keras.layers.Conv2D(64,(3,3), activation='relu',padding='same'),
tf.keras.layers.Conv2D(128,(3,3), activation='relu',padding='same'),
tf.keras.layers.MaxPooling2D(( 2,2)) ,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Conv2D(64,(3,3), activation='relu',padding='same'),
tf.keras.layers.Conv2D(128,(3,3), activation='relu',padding='same'),
tf.keras.layers.MaxPooling2D(( 2,2)) ,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(1024, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)) ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.001)) ,
tf.keras.layers.BatchNormalization() ,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(10, activation='softmax')
] ) | Digit Recognizer |
7,366,854 | def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
g = torch.Generator()
g.manual_seed(0 )<load_pretrained> | tf.keras.backend.clear_session()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(
train_image_gen,
epochs=5,
validation_data=validation_image_gen
) | Digit Recognizer |
7,366,854 | batch_size = 4
loader_train = DataLoader(dataset_train, batch_size=batch_size,
shuffle=True, worker_init_fn=seed_worker,
generator=g, num_workers=2)
loader_valid = DataLoader(dataset_valid, batch_size=batch_size,
shuffle=False, worker_init_fn=seed_worker,
generator=g, num_workers=2 )<install_modules> | validation_predict = model.predict(X_validation)
low_predictions = pd.DataFrame()
low_predictions['label'] = np.argmax(validation_predict, axis=1)
low_predictions['Confidence'] = np.max(validation_predict, axis=1)
low_index = low_predictions.sort_values(by=['Confidence'])[:10].index
low_labels = low_predictions.sort_values(by=['Confidence'])['label']
fig, axs = plt.subplots(2, 5, figsize=(24,9.5))
for i, low_label, ax in zip(low_index,low_labels, axs.flat):
image = X_validation[i].astype('float32' ).reshape(28, 28)
ax.imshow(image, cmap='gray')
ax.set_xlabel("True: {}".format(y_validation.iloc[i]))
ax.set_title("Guessed:{} ".format(low_label))
| Digit Recognizer |
7,366,854 | !pip install efficientnet-pytorch==0.7.1<load_pretrained> | train_image_generator = ImageDataGenerator(
rescale=1./255,
rotation_range=25,
width_shift_range=.1,
height_shift_range=.1,
zoom_range=0.1
)
validation_image_generator = ImageDataGenerator(
rescale=1./255,
rotation_range=25,
width_shift_range=.1,
height_shift_range=.1,
zoom_range=0.1
)
train_image_gen = train_image_generator.flow(
x=X_train,
y=y_train,
)
validation_image_gen = validation_image_generator.flow(
x=X_validation,
y=y_validation,
)
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate=0.001,
decay_steps=250,
decay_rate=1,
staircase=False
)
def get_optimizer() :
return tf.keras.optimizers.Adam(lr_schedule)
tf.keras.backend.clear_session()
model.compile(optimizer=get_optimizer() ,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(
train_image_gen,
epochs=20,
validation_data=validation_image_gen
) | Digit Recognizer |
7,366,854 | model = EfficientNet.from_pretrained('efficientnet-b7', num_classes=4)
model = model.to(device )<import_modules> | X_test = test.astype('float16' ).values
X_test = X_test / 255.0
X_test = X_test.reshape(len(X_test), 28, 28, 1 ) | Digit Recognizer |
7,366,854 | <choose_model_class><EOS> | label_pred = model.predict_classes(X_test, verbose=0)
submission = pd.DataFrame()
submission['Label'] = label_pred
submission['ImageId'] = submission.index + 1
submission.to_csv('.. /working/output.csv', index=False ) | Digit Recognizer |
6,789,424 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model> | from fastai import *
from fastai.vision import *
import pandas as pd | Digit Recognizer |
6,789,424 | epochs = 5
for epoch in range(epochs):
model.train()
epoch_train_loss = 0
for images, labels in tqdm(loader_train):
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
epoch_train_loss += loss.item()
loss.backward()
optimizer.step()
print(f'에폭 [{epoch+1}/{epochs}] - 훈련 데이터 손실값 : {epoch_train_loss/len(loader_train):.4f}')
model.eval()
epoch_valid_loss = 0
preds_list = []
true_onehot_list = []
with torch.no_grad() :
for images, labels in loader_valid:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
epoch_valid_loss += loss.item()
preds = torch.softmax(outputs.cpu() , dim=1 ).numpy()
true_onehot = torch.eye(4)[labels].cpu().numpy()
preds_list.extend(preds)
true_onehot_list.extend(true_onehot)
print(f'에폭 [{epoch+1}/{epochs}] - 검증 데이터 손실값 : {epoch_valid_loss/len(loader_valid):.4f} / 검증 데이터 ROC AUC : {roc_auc_score(true_onehot_list, preds_list):.4f}' )<load_pretrained> | df = pd.read_csv('.. /input/digit-recognizer/train.csv')
df2 = pd.read_csv('.. /input/mnist-in-csv/mnist_train.csv')
df2.columns = df.columns
df_temp = pd.concat([df,df2], join='outer',axis=0 ,ignore_index=False ,sort=False ).reset_index(drop=True)
df_temp.to_csv('train.csv',index=False ) | Digit Recognizer |
6,789,424 | dataset_test = ImageDataset(test, img_dir=img_dir,
transform=transform_test, is_test=True)
loader_test = DataLoader(dataset_test, batch_size=batch_size,
shuffle=False, worker_init_fn=seed_worker,
generator=g, num_workers=2 )<find_best_params> | df_temp.to_csv('train.csv',index=False ) | Digit Recognizer |
6,789,424 | model.eval()
preds = np.zeros(( len(test), 4))
with torch.no_grad() :
for i, images in enumerate(loader_test):
images = images.to(device)
outputs = model(images)
preds_part = torch.softmax(outputs.cpu() , dim=1 ).squeeze().numpy()
preds[i*batch_size:(i+1)*batch_size] += preds_part<save_to_csv> | class CustomImageItemList(ImageList):
def open(self, fn):
img = fn.reshape(28, 28)
img = np.stack(( img,)*3, axis=-1)
return Image(pil2tensor(img, dtype=np.float32))
@classmethod
def from_csv_custom(cls, path:PathOrStr, csv_name:str, imgIdx:int=1, header:str='infer', **kwargs)-> 'ItemList':
df = pd.read_csv(Path(path)/csv_name, header=header)
res = super().from_df(df, path=path, cols=0, **kwargs)
res.items = df.iloc[:,imgIdx:].apply(lambda x: x.values / 783.0, axis=1 ).values
return res | Digit Recognizer |
6,789,424 | submission[['healthy', 'multiple_diseases', 'rust', 'scab']] = preds
submission.to_csv('submission.csv', index=False )<install_modules> | test = CustomImageItemList.from_csv_custom(path=path, csv_name='test.csv', imgIdx=0)
data =(CustomImageItemList.from_csv_custom(path='.', csv_name='train.csv')
.split_by_rand_pct (.2)
.label_from_df(cols='label')
.add_test(test, label=0)
.transform(get_transforms(do_flip = False, max_rotate = 0.) , size=49)
.databunch(bs=1024, num_workers=16)
.normalize(mnist_stats))
data | Digit Recognizer |
6,789,424 | !pip install -q efficientnet<import_modules> | data.show_batch(rows=3, figsize=(12,9)) | Digit Recognizer |
6,789,424 | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from kaggle_datasets import KaggleDatasets
import tensorflow as tf
import tensorflow.keras.layers as L
from sklearn import metrics
from sklearn.model_selection import train_test_split
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
from tensorflow.keras.applications.nasnet import NASNetLarge
from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2
from tensorflow.keras.layers import Flatten,Dense,Dropout,BatchNormalization
from tensorflow.keras.models import Model,Sequential
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Conv2D, MaxPooling2D, BatchNormalization
from keras.optimizers import Adam,SGD,Adagrad,Adadelta,RMSprop
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint<train_on_grid> | arch = models.resnet50
arch | Digit Recognizer |
6,789,424 | AUTO = tf.data.experimental.AUTOTUNE
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
EPOCHS = 50
BATCH_SIZE = 8 * strategy.num_replicas_in_sync<define_variables> | learn = cnn_learner(data, arch,pretrained = False, metrics=[error_rate, accuracy], model_dir='.. /kaggle/working')
| Digit Recognizer |
6,789,424 | def format_path(st):
return GCS_DS_PATH + '/images/' + st + '.jpg'<load_from_csv> | lr = 1e-02 | Digit Recognizer |
6,789,424 | train = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/train.csv')
test = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/test.csv')
sub = pd.read_csv('/kaggle/input/plant-pathology-2020-fgvc7/sample_submission.csv')
train_paths = train.image_id.apply(format_path ).values
test_paths = test.image_id.apply(format_path ).values
train_labels = train.loc[:, 'healthy':].values
train_paths, valid_paths, train_labels, valid_labels = train_test_split(
train_paths, train_labels, test_size=0.06, random_state=2020)
train_paths = train.image_id.apply(format_path ).values
train_labels = train.loc[:, 'healthy':].values<normalization> | learn.fit_one_cycle(15, lr)
learn.save('stage-1-50' ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.