kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
9,740,221 | dmatrix_train = xgb.DMatrix(X_train_pca, y_train)
dmatrix_val = xgb.DMatrix(X_val_pca, y_val)
dmatrix_test = xgb.DMatrix(X_test )<train_model> | input_shape = X_train[0].shape | Digit Recognizer |
9,740,221 | xgb_params = {
'eta': 0.05,
'max_depth': 5,
'subsample': 1.0,
'colsample_bytree': 0.7,
'objective': 'reg:squarederror',
'eval_metric': 'rmse',
'verbosity': 0
}
partial_model = xgb.train(xgb_params,
dmatrix_train,
num_boost_round=1000,
evals=[(dmatrix_val, 'val')],
early_stopping_rounds=20,
verbose_eval=20)
num_boost_round = partial_model.best_iteration<train_model> | early_stop = EarlyStopping(monitor = 'val_loss', mode = 'min', verbose = 2, patience = 0 ) | Digit Recognizer |
9,740,221 | model = xgb.train(dict(xgb_params, verbose=1), dmatrix_train, num_boost_round=num_boost_round )<compute_test_metric> | model1 = Sequential()
model1.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'valid', activation = 'relu', input_shape = input_shape))
model1.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model1.add(MaxPool2D(pool_size=(2,2)))
model1.add(Dropout(0.25))
model1.add(Conv2D(filters = 96, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model1.add(Conv2D(filters = 128, kernel_size =(5,5), padding = 'valid', activation = 'relu'))
model1.add(MaxPool2D(pool_size=(3,3)))
model1.add(Dropout(0.33))
model1.add(Flatten())
model1.add(Dense(128, activation='relu'))
model1.add(Dropout(0.50))
model1.add(Dense(10, activation='softmax'))
| Digit Recognizer |
9,740,221 | predict = np.exp(model.predict(dmatrix_val))
rmsle = np.sqrt(mean_squared_log_error(np.exp(y_val), predict))
print('RMSLE: {:.3f}'.format(rmsle))<split> | model1.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
9,740,221 | X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=2022)
X_test = df_test_encoded<concatenate> | model1.fit(X_train, y_train, epochs = 100, batch_size = 64, verbose = 1, callbacks = [early_stop], shuffle = True ) | Digit Recognizer |
9,740,221 | train_dataset = cb.Pool(X_train, y_train)
test_dataset = cb.Pool(X_val, y_val )<choose_model_class> | predictions1 = model1.predict(X_test ) | Digit Recognizer |
9,740,221 | model = cb.CatBoostRegressor(loss_function='RMSE' )<compute_test_metric> | model2 = Sequential()
model2.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'valid', activation = 'relu', input_shape = input_shape))
model2.add(MaxPool2D(pool_size=(2,2)))
model2.add(Dropout(0.25))
model2.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model2.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model2.add(MaxPool2D(pool_size=(2,2)))
model2.add(Dropout(0.33))
model2.add(Conv2D(filters = 128, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model2.add(MaxPool2D(pool_size=(2,2)))
model2.add(Dropout(0.4))
model2.add(Flatten())
model2.add(Dense(1024, activation='relu'))
model2.add(Dropout(0.50))
model2.add(Dense(10, activation='softmax')) | Digit Recognizer |
9,740,221 | pred = np.exp(model.predict(X_val))
rmsle = np.sqrt(mean_squared_log_error(np.exp(y_val), pred))
print('RMSLE: {:.3f}'.format(rmsle))<predict_on_test> | model2.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
9,740,221 | predict = np.exp(model.predict(X_test))
submission = pd.DataFrame({'id': id_test, 'price_doc': predict})
submission.head()<save_to_csv> | model2.fit(X_train, y_train, epochs = 100, batch_size = 128, verbose = 1, callbacks = [early_stop], shuffle = True ) | Digit Recognizer |
9,740,221 | submission.to_csv('Catboost.csv', index=False )<import_modules> | predictions2 = model1.predict(X_test ) | Digit Recognizer |
9,740,221 | import zipfile<load_pretrained> | model3 = Sequential()
model3.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'valid', activation = 'relu', input_shape = input_shape))
model3.add(Conv2D(filters = 32, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model3.add(Dropout(0.25))
model3.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model3.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model3.add(MaxPool2D(pool_size=(2,2)))
model3.add(Dropout(0.33))
model3.add(Conv2D(filters = 96, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model3.add(Conv2D(filters = 96, kernel_size =(3,3), padding = 'valid', activation = 'relu'))
model3.add(MaxPool2D(pool_size=(2,2)))
model3.add(Dropout(0.4))
model3.add(Flatten())
model3.add(Dense(512, activation='relu'))
model3.add(Dropout(0.50))
model3.add(Dense(1024, activation='relu'))
model3.add(Dropout(0.50))
model3.add(Dense(10, activation='softmax')) | Digit Recognizer |
9,740,221 | train = zipfile.ZipFile('.. /input/sberbank-russian-housing-market/train.csv.zip', 'r')
test = zipfile.ZipFile('.. /input/sberbank-russian-housing-market/test.csv.zip', 'r')
macro = zipfile.ZipFile('.. /input/sberbank-russian-housing-market/macro.csv.zip', 'r' )<load_from_csv> | model3.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
9,740,221 | train.extract('train.csv')
test.extract('test.csv')
macro.extract('macro.csv' )<load_from_csv> | model3.fit(X_train, y_train, epochs = 100, batch_size = 128, verbose = 1, callbacks = [early_stop], shuffle = True ) | Digit Recognizer |
9,740,221 | train = pd.read_csv('./train.csv')
test = pd.read_csv('./test.csv')
macro = pd.read_csv('./macro.csv' )<data_type_conversions> | predictions3 = model1.predict(X_test ) | Digit Recognizer |
9,740,221 | train_mean_life_sq = train['life_sq'].mean()
train = train.fillna(value={'life_sq':train_mean_life_sq})
train = train.fillna(value={'floor':3})
train_mean_max_floor = train['max_floor'].mean()
train = train.fillna(value={'max_floor':train_mean_max_floor})
train = train.fillna(value={'material':1})
del train['timestamp'], train['id'], train['sub_area']<prepare_x_and_y> | predictions =(predictions1 + predictions2 + predictions3)/3
Predictions = []
for i in range(0, len(predictions)) :
Predictions.append(predictions[i].argmax())
id = []
for i in range(1, len(Predictions)+1):
id.append(i ) | Digit Recognizer |
9,740,221 | x_train = train.iloc[:, 0:5].copy()
y_train = train['price_doc'].copy()<create_dataframe> | output = pd.DataFrame({'ImageId': id, 'Label': Predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!")
| Digit Recognizer |
10,977,592 | del test['id'], test['timestamp'], test['sub_area']
test_mean_life_sq = test['life_sq'].mean()
test = test.fillna(value={'life_sq':test_mean_life_sq} )<prepare_x_and_y> | import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.cm as cm | Digit Recognizer |
10,977,592 | x_test = test.iloc[:, 0:5].copy()<choose_model_class> | train_data_raw = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test_data_raw = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print('train_data_shape: ' + str(train_data_raw.shape))
print('test_data_shape: ' + str(test_data_raw.shape)) | Digit Recognizer |
10,977,592 | model = BaggingRegressor(xgb.XGBRegressor(colsample_bytree=0.4,
learning_rate=0.03,
max_depth=6,
min_child_weight=1.5,
n_estimators=390,
reg_alpha=0.75,
reg_lambda=0.45,
subsample=0.6,
seed=42,
tree_method='gpu_hist', gpu_id=0
), n_estimators=50 )<train_model> | def pre_process_data(dataset, has_label=True):
if has_label:
data = dataset.iloc[:,1:].values
else:
data = dataset.iloc[:,:].values
data = data.astype(np.float)
data = np.multiply(data, 1.0 / 255.0)
data = data.reshape(data.shape[:1] +(28, 28, 1))
return data
train_images = pre_process_data(train_data_raw)
print(train_images.shape)
print(train_images ) | Digit Recognizer |
10,977,592 | k = model.fit(x_train, y_train )<predict_on_test> | def pre_process_labels(data):
labels_flat = data.iloc[:,0].values.ravel()
num_classes = np.unique(labels_flat ).shape[0]
num_labels = labels_flat.shape[0]
index_offset = np.arange(num_labels)* num_classes
labels_one_hot = np.zeros(( num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_flat.ravel() ] = 1
return labels_one_hot.astype(np.uint8)
train_labels = pre_process_labels(train_data_raw)
print(train_labels.shape)
print(train_labels ) | Digit Recognizer |
10,977,592 | preds = model.predict(x_test )<save_to_csv> | def fit(train_images, train_labels, config):
NUM_EPOCHS = config['NUM_EPOCHS']
BATCH_SIZE = config['BATCH_SIZE']
LEARNING_RATE = config['LEARNING_RATE']
DROPOUT_RATE = config['DROPOUT_RATE']
NETWORK_WIDTH = config['NETWORK_WIDTH']
initializer = tf.keras.initializers.GlorotNormal()
model = keras.models.Sequential([
keras.layers.Conv2D(32,kernel_size=(3, 3),activation='swish',kernel_initializer=initializer, input_shape=(28,28,1), use_bias=True),
keras.layers.Conv2D(32,kernel_size=(3,3), activation='swish',kernel_initializer=initializer, use_bias=True),
keras.layers.MaxPooling2D(pool_size=(2,2)) ,
keras.layers.Dropout(DROPOUT_RATE),
keras.layers.Conv2D(64,(3, 3),activation='swish', padding='same', use_bias=True),
keras.layers.MaxPooling2D(pool_size=(2,2)) ,
keras.layers.Dropout(DROPOUT_RATE),
keras.layers.Conv2D(512,(3, 3),activation='swish', padding='same', use_bias=True),
keras.layers.MaxPooling2D(pool_size=(2,2)) ,
keras.layers.Dropout(DROPOUT_RATE),
keras.layers.Conv2D(256,(2, 2),activation='swish', padding='same', use_bias=True),
keras.layers.MaxPooling2D(pool_size=(2,2)) ,
keras.layers.Dropout(DROPOUT_RATE),
keras.layers.Flatten() ,
keras.layers.Dense(128,activation='swish', kernel_regularizer =tf.keras.regularizers.l1(l=0.01)) ,
keras.layers.Dropout(DROPOUT_RATE),
keras.layers.Dense(10, activation=tf.nn.softmax),
])
opt = tf.keras.optimizers.Adam(learning_rate=LEARNING_RATE)
loss = tf.keras.losses.CategoricalCrossentropy()
model.compile(
optimizer=opt,
loss=loss,
metrics=['accuracy']
)
val_size = int(train_images.shape[0] * 0.2)
val_images = train_images[:val_size,:, :]
val_labels = train_labels[:val_size,:]
train_images = train_images[val_size:,:, :]
train_labels = train_labels[val_size:,:]
hist = model.fit(
x=train_images, y=train_labels,
epochs=NUM_EPOCHS,
batch_size=BATCH_SIZE,
validation_steps=10,
validation_data=(val_images, val_labels),
verbose=1
)
show_history(hist)
loss, acc = model.evaluate(val_images, val_labels)
return model, loss, acc | Digit Recognizer |
10,977,592 | submission = pd.DataFrame({'id':test1['id'],'price_doc':preds})
submission.to_csv('./submission.csv', index=False )<import_modules> | config = {
'BATCH_SIZE': 128,
'LEARNING_RATE': 0.001,
'DROPOUT_RATE': 0.4,
'NUM_EPOCHS': 50,
'NETWORK_WIDTH': 512
}
model, loss, acc = fit(train_images, train_labels, config)
print('Accuracy: ' + str(acc * 100)+ '%' ) | Digit Recognizer |
10,977,592 | import os
import datetime
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt<load_from_csv> | predictions = model.predict(test_data ) | Digit Recognizer |
10,977,592 | train = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv')
train.head()<data_type_conversions> | submissions=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) ,
"Label": np.argmax(predictions, axis=1)})
submissions.to_csv("my_submissions.csv", index=False, header=True ) | Digit Recognizer |
10,951,671 | train['datetime'] = pd.to_datetime(train['datetime'])
test['datetime'] = pd.to_datetime(test['datetime'])
train.head()<feature_engineering> | train_url = '/kaggle/input/digit-recognizer/train.csv'
test_url = '/kaggle/input/digit-recognizer/test.csv'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url ) | Digit Recognizer |
10,951,671 | def split_datetime(df):
df['year'] = df['datetime'].apply(lambda t: t.year)
df['month'] = df['datetime'].apply(lambda t: t.month)
df['day'] = df['datetime'].apply(lambda t: t.day)
df['dayofweek'] = df['datetime'].apply(lambda t: t.dayofweek)
df['hour'] = df['datetime'].apply(lambda t: t.hour)
df = df.drop(['datetime'], axis=1)
return df<string_transform> | X = train.drop('label', axis=1 ).astype('float32')
y = train['label']
X_sub = test.astype('float32' ) | Digit Recognizer |
10,951,671 | test = split_datetime(test)
train = split_datetime(train)
train = train.drop(['casual', 'registered'], axis=1)
train.head()<feature_engineering> | from sklearn.model_selection import train_test_split
import tensorflow.keras as keras
from keras import Sequential,layers,callbacks,preprocessing | Digit Recognizer |
10,951,671 | train['count'] = np.log1p(train['count'] )<filter> | X = X.values.reshape(-1,28,28,1)
X_sub = X_sub.values.reshape(-1,28,28,1)
y = keras.utils.to_categorical(y, 10)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=7, shuffle=True ) | Digit Recognizer |
10,951,671 | train = train[np.abs(train['count']-train['count'].mean())<=(3*train['count'].std())]<drop_column> | def spatial_cnn() :
model = Sequential([
layers.experimental.preprocessing.Rescaling(scale=1.0/255.0, input_shape=(28,28,1)) ,
layers.Conv2D(32,(3,3), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(32,(3,3), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(32,(5,5),(2,2), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(64,(3,3), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(64,(3,3), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(64,(5,5),(2,2), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Conv2D(128,(3,3), activation='relu', padding='same'),
layers.BatchNormalization() ,
layers.SpatialDropout2D(0.25),
layers.Flatten() ,
layers.Dense(300, activation="relu"),
layers.BatchNormalization() ,
layers.Dropout(0.4),
layers.Dense(10, activation='softmax')],
name='spatial_cnn')
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics='accuracy')
model.summary()
return model
model = spatial_cnn() | Digit Recognizer |
10,951,671 | drop_idx = train[(train['atemp'] > 20)&(train['atemp'] < 40)&(train['temp'] > 10)&(train['temp'] < 20)].index
train = train.drop(drop_idx )<normalization> | train_datagen = preprocessing.image.ImageDataGenerator(rotation_range=10, width_shift_range=0.1,
height_shift_range=0.1, shear_range=0.1,
zoom_range=0.1)
train_datagen.fit(X_train ) | Digit Recognizer |
10,951,671 | def scaling(df):
scaler = MinMaxScaler()
num_cols = ['temp', 'atemp', 'humidity', 'windspeed']
df[num_cols] = scaler.fit_transform(df[num_cols])
return df<normalization> | batch_size = 64
epochs = 45
steps = X_train.shape[0]//batch_size
history = dict()
flow = train_datagen.flow(X_train, y_train, batch_size=batch_size, shuffle=True)
changed = False
class cbs(callbacks.Callback):
def on_epoch_end(self, epoch, logs):
global changed
if logs['val_loss']<=0.0205 and not changed:
changed = True
print("
Changing LR
")
global reduce_lr
reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.05, patience=2, min_lr=1.0e-10)
reduce_lr = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, min_lr=1.0e-10)
early_stop = callbacks.EarlyStopping(monitor="val_loss", patience=10, mode="auto")
model_hist = model.fit_generator(flow, validation_data=(X_test, y_test), verbose=1, shuffle=True,
steps_per_epoch=steps, epochs=epochs, callbacks=[cbs() , reduce_lr, early_stop] ) | Digit Recognizer |
10,951,671 | train = scaling(train)
test = scaling(test )<split> | preds_sub = pd.DataFrame(data={"ImageId":list(range(1,X_sub.shape[0]+1)) ,"Label":(model.predict_classes(X_sub)) } ).astype(int ) | Digit Recognizer |
10,951,671 | X_train, X_test, y_train, y_test = train_test_split(train.drop(['count'], axis=1), train['count'], test_size=0.3 )<compute_test_metric> | preds_sub.to_csv("DigitRecog.csv", index=False, header=True ) | Digit Recognizer |
10,953,526 |
def rmsle(y, pred):
log_y = np.log1p(y)
log_pred = np.log1p(pred)
squared_error =(log_y - log_pred)** 2
rmsle = np.sqrt(np.mean(squared_error))
return rmsle<import_modules> | %matplotlib inline
np.random.seed(2)
| Digit Recognizer |
10,953,526 | from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.model_selection import GridSearchCV<train_on_grid> | trainv = pd.read_csv(".. /input/digit-recognizer/train.csv")
testv = pd.read_csv(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
10,953,526 | def evaluate(reg_cls, params=None):
reg = reg_cls()
if params:
reg = GridSearchCV(reg, param_grid=params, refit=True)
reg.fit(X_train, y_train)
pred = reg.predict(X_test)
y_test_exp = np.expm1(y_test)
pred_exp = np.expm1(pred)
print('
', reg_cls)
if params:
print(reg.best_params_)
reg = reg.best_estimator_
print(rmsle(y_test_exp, pred_exp))
return reg, pred_exp<find_best_params> | Y_train = trainv["label"]
X_train = trainv.drop(labels = ["label"],axis = 1)
| Digit Recognizer |
10,953,526 | lr_reg, pred_lr = evaluate(LinearRegression)
rg_reg, pred_rg = evaluate(Ridge)
ls_reg, pred_ls = evaluate(Lasso)
rf_reg, pred_rf = evaluate(RandomForestRegressor)
gb_reg, pred_gb = evaluate(GradientBoostingRegressor)
xg_reg, pred_xg = evaluate(XGBRegressor)
lg_reg, pred_lg = evaluate(LGBMRegressor)
params = {'n_estimators': [100*i for i in range(1, 6)]}
xg_reg, pred_xg = evaluate(XGBRegressor, params)
lg_reg, pred_lg = evaluate(LGBMRegressor, params )<compute_test_metric> | X_train = X_train / 255.0
testv = testv / 255.0 | Digit Recognizer |
10,953,526 | feature_importances(xg_reg )<load_from_csv> | Y_train = to_categorical(Y_train, num_classes = 10 ) | Digit Recognizer |
10,953,526 | submission = pd.read_csv('.. /input/bike-sharing-demand/sampleSubmission.csv')
submission<predict_on_test> | X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=3 ) | Digit Recognizer |
10,953,526 | pred = lg_reg.predict(test)
pred_exp = np.expm1(pred)
print(pred_exp )<feature_engineering> | model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax")) | Digit Recognizer |
10,953,526 | submission.loc[:, 'count'] = pred_exp
submission<save_to_csv> | optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] ) | Digit Recognizer |
10,953,526 | submission.to_csv('submission.csv', index=False )<save_to_csv> | learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001)
epochs = 25
batch_size = 86 | Digit Recognizer |
10,953,526 | submission.to_csv('submission.csv', index=False )<import_modules> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train ) | Digit Recognizer |
10,953,526 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns<load_from_csv> | history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs, validation_data =(X_val,Y_val),
verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size
, callbacks=[learning_rate_reduction] ) | Digit Recognizer |
10,953,526 | train = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv', parse_dates=['datetime'])
test = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv', parse_dates=['datetime'])
sub = pd.read_csv('/kaggle/input/bike-sharing-demand/sampleSubmission.csv' )<create_dataframe> | results = model.predict(testv)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
| Digit Recognizer |
10,953,526 | new_tr = train.copy()
new_test = test.copy()<feature_engineering> | sample_submission = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')
sample_submission["Label"]=results
sample_submission.to_csv("submission.csv", index=False ) | Digit Recognizer |
10,695,343 | new_tr['year'] = new_tr['datetime'].dt.year
new_tr['month'] = new_tr['datetime'].dt.month
new_tr['day'] = new_tr['datetime'].dt.day
new_tr['hour'] = new_tr['datetime'].dt.hour
new_tr['minute'] = new_tr['datetime'].dt.minute
new_tr['second'] = new_tr['datetime'].dt.second
new_tr['dayofweek'] = new_tr['datetime'].dt.dayofweek<feature_engineering> | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import cv2 as cv
import random
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
import tensorflow.keras
from tensorflow.keras.models import Sequential, load_model, Model
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
from tensorflow.keras.metrics import Precision, Recall, AUC
from tensorflow.keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
| Digit Recognizer |
10,695,343 | new_test['year'] = new_test['datetime'].dt.year
new_test['month'] = new_test['datetime'].dt.month
new_test['day'] = new_test['datetime'].dt.day
new_test['hour'] = new_test['datetime'].dt.hour
new_test['minute'] = new_test['datetime'].dt.minute
new_test['second'] = new_test['datetime'].dt.second
new_test['dayofweek'] = new_test['datetime'].dt.dayofweek<define_variables> | train_df = pd.read_csv('.. /input/digit-recognizer/train.csv')
test_df = pd.read_csv('.. /input/digit-recognizer/test.csv')
train_df.head() | Digit Recognizer |
10,695,343 | feature_names = [ 'season', 'holiday', 'workingday', 'weather',
'temp', 'atemp', 'humidity', 'windspeed',
"year", "hour", "dayofweek"]<import_modules> | %%time
samples, columns = train_df.shape
X = np.zeros(( samples,28,28,1))
y_true = np.zeros(( samples,1))
for sample in tqdm(range(samples)) :
X[sample,:,:,:] = train_df.iloc[sample,1:columns].values.reshape(28,28,1 ).astype('float32')
y_true[sample,0] = train_df.iloc[sample,0] | Digit Recognizer |
10,695,343 | from lightgbm import LGBMRegressor
from sklearn.model_selection import train_test_split<feature_engineering> | values = train_df.label
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(values)
print('The original output labels', values)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
onehot_encoded = onehot_encoder.fit_transform(integer_encoded)
samples, classes = onehot_encoded.shape
print("Number of vectors:", samples, "
Number of neurons / length of vector:", classes ) | Digit Recognizer |
10,695,343 | new_tr['log_count'] = np.log1p(new_tr['count'] )<split> | y = onehot_encoded | Digit Recognizer |
10,695,343 | X = new_tr[feature_names]
y = new_tr['log_count']
X_test_last = new_test[feature_names]
X_train, X_test, y_train, y_test = train_test_split(X, y )<choose_model_class> | %%time
def standard_norm(img):
return(img - np.mean(img)) /np.std(img)
norm_X = np.zeros(( samples,28,28,1))
for sample in tqdm(range(samples)) :
norm_X[sample,:,:,:] = standard_norm(X[sample,:,:,:] ).reshape(28,28,1)
| Digit Recognizer |
10,695,343 | model_lgbm = LGBMRegressor()<train_model> | def METRICS() :
metrics = ['accuracy',
Precision(name='precision'),
Recall(name='recall'),
AUC(name='AUC')]
return metrics
model = Sequential()
model.add(layers.Input(shape=(28, 28, 1)))
model.add(layers.Conv2D(32,(3,3), padding = 'same', activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size =(2, 2)))
model.add(layers.Conv2D(64,(3,3), padding = 'same', activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D(pool_size =(2, 2)))
model.add(layers.Conv2D(128,(3,3), padding = 'same', activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.GlobalAveragePooling2D())
model.add(layers.Dense(classes,activation='softmax', name = 'output_layer'))
model.compile(Adam(lr = 0.00100005134), metrics= METRICS() , loss = 'categorical_crossentropy')
model.summary() | Digit Recognizer |
10,695,343 | model_lgbm.fit(X_train, y_train )<compute_test_metric> | def split_data(X,Y):
return train_test_split(X, Y, test_size=0.2, random_state=42)
def train_model(model, X, Y, epochs, bs):
X_train, X_val, y_train, y_val = split_data(X,Y)
STEP_SIZE_TRAIN = X_train.shape[0]//bs + 1
STEP_SIZE_VAL = X_val.shape[0]//bs + 1
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
train_history = model.fit(X_train, y_train,
steps_per_epoch = STEP_SIZE_TRAIN,
validation_data =(X_val,y_val),
validation_steps = STEP_SIZE_VAL,
epochs = epochs, shuffle = True,
)
return train_history, model
| Digit Recognizer |
10,695,343 | model_lgbm.score(X_test, y_test )<choose_model_class> | epochs, bs = 20, 32
train_hist, final_model = train_model(model, norm_X, y, epochs, bs ) | Digit Recognizer |
10,695,343 | model_random = RandomForestRegressor()<train_model> | %%time
samples, columns = test_df.shape
X_test = np.zeros(( samples,28,28,1))
X_norm_test = np.zeros(( samples,28,28,1))
for sample in tqdm(range(samples)) :
X_test[sample,:,:,:] = test_df.iloc[sample,:].values.reshape(28,28,1 ).astype('float32')
X_norm_test[sample,:,:,:] = standard_norm(X_test[sample,:,:].reshape(28,28,1)) | Digit Recognizer |
10,695,343 | model_random.fit(X_train, y_train )<compute_test_metric> |
class MNIST_CAM:
def __init__(self, img):
self.resize_width, self.resize_height, _ = img.shape
def standard_norm(self, img):
return(( img - np.mean(img)) /np.std(img))
def feature_model(self, model):
return Model(inputs = model.layers[0].input, outputs = model.layers[-3].output)
def weight_tensor(self, model):
final_outputs = model.layers[-1]
return final_outputs.get_weights() [0]
def predict_class(self, model, X):
prob_vec = model.predict(X)
return np.argmax(prob_vec[0])
def generate_CAM(self, model, img):
norm_img = self.standard_norm(img)
Fmap_model = self.feature_model(model)
Wtensor = self.weight_tensor(model)
feature_map = Fmap_model.predict(norm_img.reshape(1,28,28,1))
label = self.predict_class(model, norm_img.reshape(1,28,28,1))
CAM = feature_map.dot(Wtensor[:,label])[0,:,:]
return cv.resize(CAM,
(self.resize_width, self.resize_height),
interpolation = cv.INTER_CUBIC), label
def generate_probvec(self, model, img):
X = self.standard_norm(img)
prob_vec = model.predict(X.reshape(1,28,28,1))
return prob_vec | Digit Recognizer |
10,695,343 | model_random.score(X_test, y_test )<compute_train_metric> | final_model.predict(X_norm_test[0,:,:,:].reshape(1,28,28,1)) | Digit Recognizer |
10,695,343 | scores = cross_val_score(model_random, X_test, y_test)
print("{} 교차 검증 점수 : {}".format(model_random,scores))<import_modules> | y_test,test_Ids = np.zeros(( samples,1)) , np.zeros(( samples,1))
for sample in tqdm(range(samples)) :
y_test[sample,0] = np.argmax(final_model.predict(X_norm_test[sample,:,:,:].reshape(1,28,28,1)))
test_Ids[sample,0] = int(sample+1 ) | Digit Recognizer |
10,695,343 | from sklearn.model_selection import LeaveOneOut, cross_val_score, KFold<choose_model_class> | label_df, pred_df = pd.DataFrame(test_Ids), pd.DataFrame(y_test)
sub_df = pd.concat([label_df, pred_df], axis = 1)
sub_df.iloc[:,:] = sub_df.iloc[:,:].astype('int')
sub_df.columns = ['ImageId', 'Label']
sub_df.head() | Digit Recognizer |
10,695,343 | <import_modules><EOS> | sub_df.to_csv('sample_submission.csv', index=False ) | Digit Recognizer |
10,932,097 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_train_metric> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torchvision import transforms, models
from torch.utils.data import DataLoader, Dataset
from torch.autograd import Variable
from torch import nn, optim
from torch.optim import lr_scheduler
import torch.nn.functional as F
import os | Digit Recognizer |
10,932,097 | def model_val(model_name, model_obj):
now_time = time.time()
model_obj.fit(X_train, y_train)
scores = cross_val_score(model_obj, X_test, y_test, cv=5,
scoring='neg_mean_squared_error')
avg_score = np.abs(scores.mean())
print("MSE 평균 : ", avg_score)
pro_time = time.time() - now_time
print("수행 시간 : {0:.3f}".format(pro_time))
print("{} Score : {}".format(model_name, avg_score))
return avg_score, pro_time<define_variables> | WORKING_FOLDER = "/kaggle/input/digit-recognizer"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device ) | Digit Recognizer |
10,932,097 | model_list = ["RandomForestRegressor", "xgb_basic",
"lightgbm-model1", "lightgbm-model2"]
exe_model = []
model_score = []
model_time = []<find_best_params> | train_csv_df = pd.read_csv(os.path.join(WORKING_FOLDER, "train.csv"))
print(train_csv_df.label.head())
print(train_csv_df.info() ) | Digit Recognizer |
10,932,097 | m_name = model_list[0]
if m_name not in exe_model:
model = RandomForestRegressor(random_state=30)
mse_score, p_time = model_val(m_name, model)
exe_model.append(m_name)
model_score.append(mse_score)
model_time.append(p_time)
else:
print(f"{m_name} 이미 실행 완료" )<import_modules> | n_feat_cols = 28 * 28
class MNIST_Dataset(Dataset):
def __init__(self, dataframe, transform = transforms.Compose([
transforms.ToPILImage() ,
transforms.RandomAffine(degrees=20, translate=(0.1, 0.1)) ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,))
])) :
df = dataframe
if len(df.columns)== n_feat_cols:
self.X = df.values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
else:
self.X = df.iloc[:,1:].values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
| Digit Recognizer |
10,932,097 | from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor<choose_model_class> | class MNIST_ResNet(ResNet):
def __init__(self):
super(MNIST_ResNet, self ).__init__(BasicBlock, [2, 2, 2, 2], num_classes=10)
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=3,bias=False)
model = MNIST_ResNet()
model = model.to(device)
| Digit Recognizer |
10,932,097 | xg_reg = xgb.XGBRegressor(objective ='reg:linear',
colsample_bytree = 0.3,
learning_rate = 0.1,
max_depth = 3,
alpha = 0.1,
n_estimators = 1000)
mse_score, p_time = model_val('xgb_basic', xg_reg)
exe_model.append('xgb_basic')
model_score.append(mse_score)
model_time.append(p_time )<choose_model_class> | train_csv_labels = train_csv_df.iloc[:,0].values
batch_size = 256
train_full_dataset = MNIST_Dataset(train_csv_df)
train_full_loader = torch.utils.data.DataLoader(dataset=train_full_dataset, batch_size=batch_size, shuffle=True)
| Digit Recognizer |
10,932,097 | m_lgbm1 = lgb.LGBMRegressor()
mse_score, p_time = model_val('lightgbm-model1', m_lgbm1)
exe_model.append('lightgbm-model1')
model_score.append(mse_score)
model_time.append(p_time )<init_hyperparams> | epochs = 50
steps = 0
print_every = 132
train_losses, val_losses = [], []
step_size = 10
base_lr = 0.01
optimizer = optim.Adam(model.parameters() , lr=base_lr)
criterion = nn.CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.1)
print("Training...")
for e in range(epochs):
print("Epoch ", e)
running_loss = 0
for images, labels in train_full_loader:
images = images.to(device)
labels = labels.to(device)
steps += 1
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
train_losses.append(running_loss/len(train_full_loader))
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}.. ".format(train_losses[-1])
)
exp_lr_scheduler.step()
| Digit Recognizer |
10,932,097 | hyperparameters = {'boosting_type': 'gbdt',
'colsample_bytree': 0.7250136792694301,
'is_unbalance': False,
'learning_rate': 0.013227664889528229,
'min_child_samples': 20,
'num_leaves': 56,
'reg_alpha': 0.7543896477745794,
'reg_lambda': 0.07152751159655985,
'subsample_for_bin': 240000,
'subsample': 0.5233384321711397,
'n_estimators': 2000}
m_lgbm2 = lgb.LGBMRegressor(**hyperparameters)
mse_score, p_time = model_val('lightgbm-model2', m_lgbm2)
exe_model.append('lightgbm-model2')
model_score.append(mse_score)
model_time.append(p_time )<train_model> | test_csv_df = pd.read_csv(os.path.join(WORKING_FOLDER, "test.csv"))
batch_size = 256
test_dataset = MNIST_Dataset(test_csv_df)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False ) | Digit Recognizer |
10,932,097 | print(exe_model)
print(model_score)
print(model_time )<define_variables> | submission = [['ImageId', 'Label']]
with torch.no_grad() :
model.eval()
image_id = 1
for images in test_loader:
images = images.to(device)
log_ps = model(images)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
for prediction in top_class:
submission.append([image_id, prediction.item() ])
image_id += 1
| Digit Recognizer |
10,932,097 | <init_hyperparams><EOS> | submission_df = pd.DataFrame(submission)
submission_df.columns = submission_df.iloc[0]
submission_df = submission_df.drop(0, axis=0)
submission_df.to_csv("submission.csv", index=False ) | Digit Recognizer |
10,938,950 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options> | %matplotlib inline | Digit Recognizer |
10,938,950 | %matplotlib inline
pd.set_option("display.max_columns", 100)
def fxn() :
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings() :
warnings.simplefilter("ignore")
fxn()
<load_from_csv> | X = pd.read_csv('.. /input/digit-recognizer/train.csv')
X_test = pd.read_csv('.. /input/digit-recognizer/test.csv')
X.shape, X_test.shape | Digit Recognizer |
10,938,950 | data=pd.read_csv('.. /input/bike-sharing-demand/train.csv')
<rename_columns> | X_train = X.drop(['label'],1)
Y_train = X['label']
X_train.shape | Digit Recognizer |
10,938,950 | data=data.rename(columns={col_names[5]: "Temperature(C)",col_names[6]:"Dew Temperature(C)",col_names[7]: "humidity(%)",
col_names[8]:"Wind Speed(m/s)",col_names[11]:"Number of rented bikes"
} )<count_missing_values> | X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = X_train/255
X_test = X_test/255
X_train = X_train.reshape(-1, 28, 28, 1)
X_test = X_test.reshape(-1,28 ,28, 1)
X_train.shape, X_test.shape
| Digit Recognizer |
10,938,950 | data.isna().sum()<feature_engineering> | Y_train= tf.keras.utils.to_categorical(Y_train, 10)
Y_train.shape | Digit Recognizer |
10,938,950 | def convert_date(df):
df['datetime'] = pd.to_datetime(df['datetime'])
df['month'] = df['datetime'].dt.month
df['hour'] = df['datetime'].dt.hour
df['weekday'] = df['datetime'].dt.dayofweek
df["day"]=df["datetime"].dt.day
df["year"]=df["datetime"].dt.year
df['dayofweek'] = df['datetime'].dt.dayofweek
df['month_start'] = df['datetime'].dt.is_month_start
df['woy'] = df['datetime'].dt.isocalendar().week.astype(int)
return df<data_type_conversions> | x_train, val_x, y_train, val_y = train_test_split(X_train, Y_train, test_size=0.20 ) | Digit Recognizer |
10,938,950 | data=convert_date(data )<create_dataframe> | datagen = ImageDataGenerator(zoom_range = 0.1,
height_shift_range = 0.1,
width_shift_range = 0.1,
rotation_range = 15 ) | Digit Recognizer |
10,938,950 | df1VizSeasons=pd.DataFrame(data.groupby('season' ).sum() ['Number of rented bikes'].sort_values(ascending=False))
df1VizSeasons.style.background_gradient(cmap=sns.light_palette("red", as_cmap=True))<create_dataframe> | es = EarlyStopping(monitor='loss', patience=12)
filepath="/kaggle/working/bestmodel.h5"
md = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min' ) | Digit Recognizer |
10,938,950 | df1VizDays=pd.DataFrame(data.groupby('dayofweek' ).sum() ['Number of rented bikes'].sort_values(ascending=False))
df1VizDays.style.background_gradient(cmap=sns.light_palette("red", as_cmap=True))<drop_column> | epochs = 30
num_classes = 10
batch_size = 128
input_shape =(28, 28, 1)
adam = tf.keras.optimizers.Adam(0.001 ) | Digit Recognizer |
10,938,950 | def drop_cols(df):
df=df.drop(['datetime','casual','registered'],axis=1)
return df<drop_column> | i = Input(shape=input_shape)
x = Conv2D(32,(3, 3), strides=2, activation='relu' )(i)
x = Conv2D(64,(3, 3), strides=2, activation='relu' )(x)
x = Flatten()(x)
x = Dropout(0.2 )(x)
x = Dense(1024, activation='relu' )(x)
x = Dropout(0.2 )(x)
x = Dense(K, activation='softmax' )(x)
model = Model(i, x)
model.summary() | Digit Recognizer |
10,938,950 | data=drop_cols(data )<define_variables> | model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'] ) | Digit Recognizer |
10,938,950 | features_plot=['Number of rented bikes', 'Temperature(C)', 'humidity(%)',
'Wind Speed(m/s)', 'Dew Temperature(C)',
]<define_variables> | History = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
epochs = epochs,
validation_data =(val_x, val_y),
callbacks = [es,md],
shuffle = True
) | Digit Recognizer |
10,938,950 | def find_skewed_boundaries(df, variable, distance):
IQR = df[variable].quantile(0.75)- df[variable].quantile(0.25)
lower_boundary = df[variable].quantile(0.25)-(IQR * distance)
upper_boundary = df[variable].quantile(0.75)+(IQR * distance)
return upper_boundary, lower_boundary<feature_engineering> | pred = model1.predict(X_test)
pred_class = np.argmax(pred,axis=1)
| Digit Recognizer |
10,938,950 | def additional(df):
df['ideal'] = df[['Temperature(C)', 'Wind Speed(m/s)']].apply(lambda x:(0, 1)[x['Temperature(C)'] > 27 and x['Wind Speed(m/s)'] < 30], axis = 1)
df['sticky'] = df[['humidity(%)', 'workingday']].apply(lambda x:(0, 1)[x['workingday'] == 1 and x['humidity(%)'] >= 60], axis = 1)
df["windchill"]=13.12+0.6215*df['Temperature(C)']-11.37*(df['Wind Speed(m/s)']*3.6)**0.16+0.3965*df['Temperature(C)'] \
*(df['Wind Speed(m/s)']*3.6)**0.16
df['newweather']=17.3+df['Temperature(C)']-0.11*df['humidity(%)']+0.34*df['Wind Speed(m/s)']
df['hour_sin'] = np.sin(2 * np.pi * df['hour']/23.0)
df['hour_cos'] = np.cos(2 * np.pi * df['hour']/23.0)
df['month_sin'] = np.sin(2 * np.pi * df['month']/12)
df['month_cos'] = np.cos(2 * np.pi * df['month']/12)
df['log_wind']=np.log1p(df['Wind Speed(m/s)'])
df['log_hum']=np.log1p(df['humidity(%)'])
return df<feature_engineering> | submissions=pd.DataFrame({"ImageId": list(range(1,len(pred_class)+1)) ,
"Label": pred_class})
submissions.to_csv("submissions.csv", index=False, header=True)
submissions
| Digit Recognizer |
9,568,647 | cond1 = data['workingday']== 'Yes'
cond2 = data['hour'].isin([7,8,9,17,18,19])
cond3 = data['hour'].isin([10,11,12,13,14,15,16])
cond4 = data['hour'].isin([0,1,2,3,4,5,6,20,21,22,23])
conditions =[~cond1,cond1&cond2,cond1&cond3,cond1&cond4]
vals = ['No','High','Medium','Low']
data['Map demand'] = np.select(conditions,vals)
<categorify> | import os
from pathlib import Path
from PIL import Image | Digit Recognizer |
9,568,647 | data=additional(data )<filter> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
9,568,647 | cat_features = data.columns[data.dtypes=='object']
cat_features=list(cat_features)
cat_features<count_values> | def create_image_from_row(digit, dest_path):
mat = digit.reshape(28,28)
mat = mat.astype(np.uint8)
img = Image.fromarray(mat)
img.save(dest_path ) | Digit Recognizer |
9,568,647 | def count_category(name):
print(name)
print(data[name].value_counts())
print("-------------------------------------------------")
for i in cat_features:
count_category(i )<groupby> | for index, row in train.iterrows() :
label,digit = row[0], row[1:]
directory = images_train/str(label)
filename = f"{index}.jpg"
dest_path = directory/filename
digit = digit.values
create_image_from_row(digit, dest_path ) | Digit Recognizer |
9,568,647 | count_bikes_by_hour = data.groupby("hour")["Number of rented bikes"].sum()
print(count_bikes_by_hour )<groupby> | for index, row in test.iterrows() :
filename = f"{index}.jpg"
dest_path = images_test/filename
digit = row.values
create_image_from_row(digit, dest_path ) | Digit Recognizer |
9,568,647 | count_bikes_by_Holiday = data.groupby("holiday")["Number of rented bikes"].sum()
print(count_bikes_by_Holiday )<groupby> | print('A few samples of the training images.')
displayTrainingData() | Digit Recognizer |
9,568,647 | count_bikes_by_funcday = data.groupby("workingday")["Number of rented bikes"].sum()
print(count_bikes_by_funcday )<groupby> | from fastai.vision import * | Digit Recognizer |
9,568,647 | count_bikes_by_Season = data.groupby("season")["Number of rented bikes"].sum()
print(count_bikes_by_Season )<feature_engineering> | data = ImageDataBunch.from_folder(images, train='train', test='test', valid_pct=0.2, bs=32,
ds_tfms=get_transforms() , size=224, num_workers=4 ).normalize(imagenet_stats ) | Digit Recognizer |
9,568,647 | def rolling_mean(df,cols,window=3):
for col in cols:
new_col = col+'_rolled'
df[new_col] = df[col].rolling(window=window ).mean()
return df
<drop_column> | data.show_batch(rows=3, figsize=(7,8)) | Digit Recognizer |
9,568,647 | data = rolling_mean(data,['Temperature(C)','Wind Speed(m/s)',
'humidity(%)','weather'])
<categorify> | data.classes, data.c, len(data.train_ds), len(data.valid_ds ) | Digit Recognizer |
9,568,647 | def shifting(df,cols):
for col in cols:
new_col = col+'_shifted'
df[new_col] = df[col].shift(periods=-1)
return df
data = shifting(data,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
def shifting_1(df,cols):
for col in cols:
new_col = col+'_shifted1'
df[new_col] = df[col].shift(periods=-2)
return df
data = shifting_1(data,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
def shifting_2(df,cols):
for col in cols:
new_col = col+'_shifted2'
df[new_col] = df[col].shift(periods=-3)
return df
data = shifting_2(data,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'] )<data_type_conversions> | learn = cnn_learner(data, models.resnet18, metrics=error_rate ) | Digit Recognizer |
9,568,647 | data=data.fillna(data.mean() )<categorify> | learn.fit_one_cycle(4 ) | Digit Recognizer |
9,568,647 | def conv_cat_features(df):
df['holiday'].replace({"No Holiday":1, "Holiday":0}, inplace=True)
df['workingday'].replace({"Yes":1, "No":0}, inplace=True)
data_cat_transformed = pd.get_dummies(df, drop_first=True)
return data_cat_transformed<categorify> | learn.save('stage-1' ) | Digit Recognizer |
9,568,647 | data_cat_transformed=conv_cat_features(data)
print("{} total features after one-hot encoding.".format(len(data_cat_transformed.columns)))
print(data_cat_transformed.columns )<split> | learn.lr_find() | Digit Recognizer |
9,568,647 | X = data_cat_transformed.drop('Number of rented bikes', axis=1)
y = data_cat_transformed['Number of rented bikes']
def custom_train_valid_split(data, cutoff_day=15):
train = data[data['day'] <= cutoff_day]
valid = data[data['day'] > cutoff_day]
return train,valid
train,valid = custom_train_valid_split(data, cutoff_day=15)
train_idx = train.index
valid_idx = valid.index
myCViterator = []
myCViterator.append(( train_idx, valid_idx))<compute_test_metric> | learn.fit_one_cycle(2, max_lr=slice(3e-5,3e-3)) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.