kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
9,568,647 | def RMSLE(y_train,y_pred):
return np.sqrt(mean_squared_log_error(y_train, y_pred))<load_from_csv> | learn.save('stage-2' ) | Digit Recognizer |
9,568,647 | test_df=pd.read_csv('.. /input/bike-sharing-demand/test.csv')
test_df_copy=test_df.copy()<rename_columns> | learn.load('stage-2'); | Digit Recognizer |
9,568,647 | test_df=test_df.rename(columns={col_names[5]: "Temperature(C)",col_names[6]:"Dew Temperature(C)",col_names[7]: "humidity(%)",
col_names[8]:"Wind Speed(m/s)"
} )<data_type_conversions> | interp = ClassificationInterpretation.from_learner(learn ) | Digit Recognizer |
9,568,647 | test_df=convert_date(test_df )<feature_engineering> | losses,idxs = interp.top_losses() | Digit Recognizer |
9,568,647 | cond1 = test_df['workingday']=='Yes'
cond2 = test_df['hour'].isin([7,8,9,17,18,19])
cond3 = test_df['hour'].isin([10,11,12,13,14,15,16])
cond4 = test_df['hour'].isin([0,1,2,3,4,5,6,20,21,22,23])
conditions =[~cond1,cond1&cond2,cond1&cond3,cond1&cond4]
vals = ['No','High','Medium','Low']
test_df['Map demand'] = np.select(conditions,vals )<drop_column> | class_score, y = learn.get_preds(DatasetType.Test ) | Digit Recognizer |
9,568,647 | test_df=test_df.drop(['datetime',],axis=1)
<concatenate> | probabilities = class_score[0].tolist()
[f"{index}: {probabilities[index]}" for index in range(len(probabilities)) ] | Digit Recognizer |
9,568,647 | test_df=additional(test_df )<drop_column> | class_score = np.argmax(class_score, axis=1 ) | Digit Recognizer |
9,568,647 | test_df = rolling_mean(test_df,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
test_df = shifting(test_df,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
test_df = shifting_1(test_df,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
test_df = shifting_2(test_df,['Temperature(C)','Wind Speed(m/s)','weather', 'humidity(%)'])
test_df=test_df.fillna(test_df.mean() )<feature_engineering> | INPUT = Path(".. /input/digit-recognizer")
TEST = Path(".. /test" ) | Digit Recognizer |
9,568,647 | test_cat_transformed=conv_cat_features(test_df )<init_hyperparams> | ImageId = [os.path.splitext(path)[0] for path in os.listdir(images_test)]
ImageId = [int(path)for path in ImageId]
ImageId = [ID+1 for ID in ImageId] | Digit Recognizer |
9,568,647 | <train_on_grid><EOS> | submission = pd.DataFrame({
"ImageId": ImageId,
"Label": class_score
})
submission.to_csv("submission.csv", index=False)
display(submission.head(3))
display(submission.tail(3)) | Digit Recognizer |
10,774,499 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe> | %reload_ext autoreload
%autoreload 2
%matplotlib inline | Digit Recognizer |
10,774,499 | X_train=X.copy()
X_test=test_cat_transformed.copy()<create_dataframe> | warnings.simplefilter('ignore' ) | Digit Recognizer |
10,774,499 | X_train_cat = X_train.copy()
X_test_cat = X_test.copy()<choose_model_class> | from fastai import *
from fastai.vision import *
import torchvision.models | Digit Recognizer |
10,774,499 | xgb_v = XGBRegressor(
objective='count:poisson',random_state=42)
xgb= XGBRegressor(
objective='count:poisson',random_state=42,
subsample= 0.83, n_estimators= 3000,
min_child_weight= 16, max_depth= 5,
learning_rate= 0.055, gamma= 1, colsample_bytree= 0.5)
catboost_v=CatBoostRegressor(objective='Poisson',iterations=4000,l2_leaf_reg=1,learning_rate=0.054 , silent = True)
catboosttrial=CatBoostRegressor(objective='Poisson',iterations=4000,l2_leaf_reg=1,learning_rate=0.054,silent=True)
xgb_models={
"XGB_vanella":xgb_v,
"XGB_mod" : xgb }
cat_models ={
"Catbbost_mod" :catboosttrial,
"Catboost_Vanilla" :catboost_v}<compute_train_metric> | import os
from pathlib import Path
import pandas as pd | Digit Recognizer |
10,774,499 | columns = ['Classifier', 'RMSLE']
df_scores_cat = pd.DataFrame(columns=columns)
for name, model in cat_models.items() :
model.fit(X_train_cat, y)
Y_predicted = model.predict(X_train_cat)
rmsle = np.sqrt(mean_squared_log_error(y, Y_predicted))
df_row = pd.DataFrame([[name, rmsle]], columns=columns)
df_scores_cat = df_scores_cat.append(df_row, ignore_index=True )<compute_train_metric> | train_df = pd.read_csv(INPUT/"train.csv")
train_df.head(3 ) | Digit Recognizer |
10,774,499 | columns = ['Classifier', 'RMSLE']
df_scores_xgb = pd.DataFrame(columns=columns)
for name, model in xgb_models.items() :
model.fit(X_train, y)
Y_predicted = model.predict(X_train)
rmsle = np.sqrt(mean_squared_log_error(y, Y_predicted))
df_row = pd.DataFrame([[name, rmsle]], columns=columns)
df_scores_xgb = df_scores_xgb.append(df_row, ignore_index=True )<sort_values> | test_df = pd.read_csv(INPUT/"test.csv")
test_df.head(3 ) | Digit Recognizer |
10,774,499 | df_scores_cat.sort_values(by=['RMSLE'] )<sort_values> | TRAIN = Path(".. /train")
TEST = Path(".. /test" ) | Digit Recognizer |
10,774,499 | df_scores_xgb.sort_values(by=['RMSLE'] )<predict_on_test> | def saveDigit(digit, filepath):
digit = digit.reshape(28,28)
digit = digit.astype(np.uint8)
img = Image.fromarray(digit)
img.save(filepath ) | Digit Recognizer |
10,774,499 | y_cat_pred=catboosttrial.predict(X_test_cat )<predict_on_test> | for index, row in train_df.iterrows() :
label,digit = row[0], row[1:]
folder = TRAIN/str(label)
filename = f"{index}.jpg"
filepath = folder/filename
digit = digit.values
saveDigit(digit, filepath ) | Digit Recognizer |
10,774,499 | y_xgb_pred=xgb.predict(X_test )<data_type_conversions> | for index, digit in test_df.iterrows() :
folder = TEST
filename = f"{index}.jpg"
filepath = folder/filename
digit = digit.values
saveDigit(digit, filepath ) | Digit Recognizer |
10,774,499 |
<data_type_conversions> | tfms = get_transforms(do_flip=False ) | Digit Recognizer |
10,774,499 | test_df_copy['count'] = y_cat_pred.astype('int' )<filter> | data = ImageDataBunch.from_folder(
path = TRAIN,
test = TEST,
valid_pct = 0.2,
bs = 256,
size = 28,
num_workers = 5,
ds_tfms = tfms
).normalize(mnist_stats ) | Digit Recognizer |
10,774,499 | test_df_copy[test_df_copy["workingday"]=='No']<save_to_csv> | resnet_learn = Learner(data, torchvision.models.resnet50(pretrained=True), metrics=[error_rate, accuracy, top_k_accuracy], model_dir="/tmp/models", callback_fns=ShowGraph)
googlenet_learn = Learner(data, torchvision.models.googlenet(pretrained=True), metrics=[error_rate, accuracy, top_k_accuracy], model_dir="/tmp/models", callback_fns=ShowGraph)
resnext_learn = Learner(data, torchvision.models.resnext50_32x4d(pretrained=True), metrics=[error_rate, accuracy, top_k_accuracy], model_dir="/tmp/models", callback_fns=ShowGraph)
wideres_learn = Learner(data, torchvision.models.wide_resnet50_2(pretrained=True), metrics=[error_rate, accuracy, top_k_accuracy], model_dir="/tmp/models", callback_fns=ShowGraph)
mobilenet_learn = Learner(data, torchvision.models.mobilenet_v2(pretrained=True), metrics=[error_rate, accuracy, top_k_accuracy], model_dir="/tmp/models", callback_fns=ShowGraph ) | Digit Recognizer |
10,774,499 | test_df_copy[['datetime','count']].to_csv("submissions1.csv",index=False )<load_from_csv> | model = [resnet_learn, googlenet_learn, resnext_learn, wideres_learn, mobilenet_learn] | Digit Recognizer |
10,774,499 | data_path = '/kaggle/input/bike-sharing-demand/'
train = pd.read_csv(data_path + 'train.csv')
test = pd.read_csv(data_path + 'test.csv')
submission = pd.read_csv(data_path + 'sampleSubmission.csv' )<filter> | for learn in model:
learn.lr_find()
learn.recorder.plot(suggestion=True ) | Digit Recognizer |
10,774,499 | train = train[train['weather'] != 4]<concatenate> | resnet_learn.fit_one_cycle(10 ) | Digit Recognizer |
10,774,499 | all_data = pd.concat([train, test], ignore_index=True)
all_data<feature_engineering> | googlenet_learn.fit_one_cycle(10 ) | Digit Recognizer |
10,774,499 | all_data['date'] = all_data['datetime'].apply(lambda x: x.split() [0])
all_data['year'] = all_data['datetime'].apply(lambda x: x.split() [0].split('-')[0])
all_data['month'] = all_data['datetime'].apply(lambda x: x.split() [0].split('-')[1])
all_data['hour'] = all_data['datetime'].apply(lambda x: x.split() [1].split(':')[0])
all_data["weekday"] = all_data['date'].apply(lambda dateString : datetime.strptime(dateString,"%Y-%m-%d" ).weekday() )<drop_column> | resnext_learn.fit_one_cycle(10 ) | Digit Recognizer |
10,774,499 | drop_features = ['casual', 'registered', 'datetime', 'date', 'windspeed', 'month']
all_data = all_data.drop(drop_features, axis=1 )<prepare_x_and_y> | wideres_learn.fit_one_cycle(10 ) | Digit Recognizer |
10,774,499 | X_train = all_data[~pd.isnull(all_data['count'])]
X_test = all_data[pd.isnull(all_data['count'])]
X_train = X_train.drop(['count'], axis=1)
X_test = X_test.drop(['count'], axis=1)
y = train['count']<compute_test_metric> | mobilenet_learn.fit_one_cycle(10 ) | Digit Recognizer |
10,774,499 | def rmsle(y_true, y_pred, convertExp=True):
if convertExp:
y_true = np.exp(y_true)
y_pred = np.exp(y_pred)
log_true = np.nan_to_num(np.array([np.log(y+1)for y in y_true]))
log_pred = np.nan_to_num(np.array([np.log(y+1)for y in y_pred]))
output = np.sqrt(np.mean(( log_true - log_pred)**2))
return output<choose_model_class> | ImageId = [int(os.path.splitext(path)[0])+1 for path in os.listdir(TEST)] | Digit Recognizer |
10,774,499 | <train_on_grid><EOS> | model_name = ['resnet', 'googlenet', 'resnext', 'wideres', 'mobilenet']
i = 0
for learn in model:
class_score, y = learn.get_preds(DatasetType.Test)
class_score = np.argmax(class_score, axis=1)
submission = pd.DataFrame({"ImageId": ImageId,"Label": class_score})
submission.to_csv("submission_"+str(model_name[i])+".csv", index=False)
i += 1 | Digit Recognizer |
11,245,893 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_on_grid> | !pip install git+https://github.com/keras-team/keras-tuner.git -q | Digit Recognizer |
11,245,893 | log_y = np.log1p(y)
gridsearch_ridge_model.fit(X_train, log_y )<find_best_params> | def load_train(path):
data = pd.read_csv(path)
y = data["label"]
x = data.drop(labels=["label"], axis=1 ).values.reshape(-1, 28, 28, 1)
return x, y
def load_test(path):
data = pd.read_csv(path)
x = data.values.reshape(-1, 28, 28, 1)
return x
x_train, y_train = load_train(".. /input/digit-recognizer/train.csv")
x_test = load_test(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
11,245,893 | print('최적 파라미터 :', gridsearch_ridge_model.best_params_ )<compute_train_metric> | def augment_images(x, hp):
use_rotation = hp.Boolean('use_rotation')
if use_rotation:
x = layers.experimental.preprocessing.RandomRotation(
hp.Float('rotation_factor', min_value=0.05, max_value=0.2)
)(x)
use_zoom = hp.Boolean('use_zoom')
if use_zoom:
x = layers.experimental.preprocessing.RandomZoom(
hp.Float('use_zoom', min_value=0.05, max_value=0.2)
)(x)
return x
def make_model(hp):
inputs = keras.Input(shape=(28, 28, 1))
x = layers.experimental.preprocessing.Rescaling(1./ 255 )(inputs)
x = layers.experimental.preprocessing.Resizing(64, 64 )(x)
x = augment_images(x, hp)
num_block = hp.Int('num_block', min_value=2, max_value=5, step=1)
num_filters = hp.Int('num_filters', min_value=32, max_value=128, step=32)
for i in range(num_block):
x = layers.Conv2D(
num_filters,
kernel_size=3,
activation='relu',
padding='same'
)(x)
x = layers.Conv2D(
num_filters,
kernel_size=3,
activation='relu',
padding='same'
)(x)
x = layers.MaxPooling2D(2 )(x)
reduction_type = hp.Choice('reduction_type', ['flatten', 'avg'])
if reduction_type == 'flatten':
x = layers.Flatten()(x)
else:
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(
units=hp.Int('num_dense_units', min_value=32, max_value=512, step=32),
activation='relu'
)(x)
x = layers.Dropout(
hp.Float('dense_dropout', min_value=0., max_value=0.7)
)(x)
outputs = layers.Dense(10 )(x)
model = keras.Model(inputs, outputs)
learning_rate = hp.Float('learning_rate', min_value=3e-4, max_value=3e-3)
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
model.compile(loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=optimizer,
metrics=[keras.metrics.SparseCategoricalAccuracy(name='acc')])
model.summary()
return model | Digit Recognizer |
11,245,893 | preds = gridsearch_ridge_model.best_estimator_.predict(X_train)
print(f'릿지 회귀 RMSLE 값 : {rmsle(log_y, preds, True):.4f}' )<choose_model_class> | tuner = kt.tuners.RandomSearch(
make_model,
objective='val_acc',
max_trials=100,
overwrite=True)
callbacks=[keras.callbacks.EarlyStopping(monitor='val_acc', mode='max', patience=3, baseline=0.9)]
tuner.search(x_train, y_train, validation_split=0.2, callbacks=callbacks, verbose=1, epochs=100 ) | Digit Recognizer |
11,245,893 | lasso_model = Lasso()
lasso_alpha = 1/np.array([0.1, 1, 2, 3, 4, 10, 30, 100, 200, 300, 400, 800, 900, 1000])
lasso_params = {'max_iter':[3000], 'alpha':lasso_alpha}
gridsearch_lasso_model = GridSearchCV(estimator=lasso_model,
param_grid=lasso_params,
scoring=rmsle_scorer,
cv=5)
log_y = np.log1p(y)
gridsearch_lasso_model.fit(X_train, log_y)
print('최적 파라미터 :', gridsearch_lasso_model.best_params_ )<compute_train_metric> | best_hp = tuner.get_best_hyperparameters() [0]
model = make_model(best_hp)
history = model.fit(x_train, y_train, validation_split=0.2, epochs=50 ) | Digit Recognizer |
11,245,893 | preds = gridsearch_lasso_model.best_estimator_.predict(X_train)
print(f'라쏘 회귀 RMSLE 값 : {rmsle(log_y, preds, True):.4f}' )<choose_model_class> | val_acc_per_epoch = history.history['val_acc']
best_epoch = val_acc_per_epoch.index(max(val_acc_per_epoch)) + 1
model = make_model(best_hp)
model.fit(x_train, y_train, epochs=best_epoch ) | Digit Recognizer |
11,245,893 | <compute_train_metric><EOS> | predictions = model.predict(x_test)
submission = pd.DataFrame({"ImageId": list(range(1, len(predictions)+ 1)) ,
"Label": np.argmax(predictions, axis=-1)})
submission.to_csv("submission.csv", index=False ) | Digit Recognizer |
5,866,910 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<save_to_csv> | %matplotlib inline
| Digit Recognizer |
5,866,910 | submission['count'] = np.exp(randomforest_preds)
submission.to_csv('submission.csv', index=False )<import_modules> | base_set = pd.read_csv('.. /input/digit-recognizer/train.csv' ) | Digit Recognizer |
5,866,910 | import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import GridSearchCV
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
<load_from_csv> | label_column = 'label'
X = base_set.drop(columns=[label_column] ).values
Y = base_set[label_column] | Digit Recognizer |
5,866,910 | df=pd.read_csv('.. /input/bike-sharing-demand/train.csv')
test=pd.read_csv('.. /input/bike-sharing-demand/test.csv')
<count_missing_values> | X = X / 255
def reshape_data(np_array):
return np_array.reshape(len(np_array), 28, 28, 1)
X = reshape_data(X ) | Digit Recognizer |
5,866,910 | df.isnull().sum()<feature_engineering> | Y = to_categorical(Y, 10 ) | Digit Recognizer |
5,866,910 | df['Date_time'] = df['datetime'].astype('datetime64[ns]')
df['Year']=df.Date_time.dt.year
df['Month']=df.Date_time.dt.month
df['Day']=df.Date_time.dt.day
df['Hour']=df.Date_time.dt.hour
test['Date_time'] = test['datetime'].astype('datetime64[ns]')
test['Year']=test.Date_time.dt.year
test['Month']=test.Date_time.dt.month
test['Day']=test.Date_time.dt.day
test['Hour']=test.Date_time.dt.hour<count_values> | train_to_valtest_ratio =.32
validate_to_test_ratio =.5
(X_train,
X_validation_and_test,
Y_train,
Y_validation_and_test)= train_test_split(X, Y, test_size=train_to_valtest_ratio)
(X_validation,
X_test,
Y_validation,
Y_test)= train_test_split(X_validation_and_test, Y_validation_and_test, test_size=validate_to_test_ratio ) | Digit Recognizer |
5,866,910 | for i in df.columns:
print(i)
print(df[i].value_counts().sum)
print("***********************************************" )<feature_engineering> | model = Sequential([
Conv2D(64, kernel_size=3, padding='same', activation='relu', input_shape=X.shape[1:]),
Conv2D(64, kernel_size=3, padding='same', activation='relu'),
Conv2D(128, kernel_size=3, padding='same', activation='relu'),
MaxPooling2D(pool_size=2),
Conv2D(128, kernel_size=3, padding='same', activation='relu'),
Conv2D(192, kernel_size=3, padding='same', activation='relu'),
MaxPooling2D(pool_size=2),
Conv2D(192, kernel_size=5, padding='same', activation='relu'),
MaxPooling2D(pool_size=2, padding='same'),
Dropout(0.25),
Flatten() ,
Dense(256, activation='relu'),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.summary() | Digit Recognizer |
5,866,910 | df = df.drop(columns=['holiday'])
test = test.drop(columns=['holiday'])
df['windspeed']=df['windspeed'].replace(0,df['windspeed'].median())
test['windspeed']=test['windspeed'].replace(0,test['windspeed'].median())
df['weekday'] = 0
for ind in df.index:
df['weekday'][ind]= calendar.weekday(df['Year'][ind], df['Month'][ind] , df['Day'][ind])
test['weekday'] = 0
for ind in test.index:
test['weekday'][ind]= calendar.weekday(test['Year'][ind], test['Month'][ind] , test['Day'][ind])
df['is_night'] = 0
df.loc[(df['Hour'] < 6)|(df['Hour'] > 20), 'is_night'] = 1
test['is_night'] = 0
test.loc[(test['Hour'] < 6)|(test['Hour'] > 20), 'is_night'] = 1
df["weekend"] = df["weekday"].apply(lambda x: 1 if x == 5 or x == 6 else 0)
test["weekend"] = test["weekday"].apply(lambda x: 1 if x == 5 or x == 6 else 0)
df["day_cos"] = np.cos(( df['Hour'])*(2 * np.pi / 24))
df["day_sin"] = np.sin(( df['Hour'])*(2 * np.pi / 24))
df["month_cos"] = np.cos(( df['Month'])*(2 * np.pi / 12))
df["month_sin"] = np.sin(( df['Month'])*(2 * np.pi / 12))
df["weekday_sin"] = np.sin(( df['weekday'])*(2 * np.pi / 7))
df["weekday_cos"] = np.cos(( df['weekday'])*(2 * np.pi / 7))
test["day_cos"] = np.cos(( test['Hour'])*(2 * np.pi / 24))
test["day_sin"] = np.sin(( test['Hour'])*(2 * np.pi / 24))
test["month_cos"] = np.cos(( test['Month'])*(2 * np.pi / 12))
test["month_sin"] = np.sin(( test['Month'])*(2 * np.pi / 12))
test["weekday_sin"] = np.sin(( test['weekday'])*(2 * np.pi / 7))
test["weekday_cos"] = np.cos(( test['weekday'])*(2 * np.pi / 7))
Q1_windspeed = df['windspeed'].quantile(0.25)
Q3_windspeed = df['windspeed'].quantile(0.75)
IQR1 = Q3_windspeed - Q1_windspeed
Q1_causal = df['casual'].quantile(0.25)
Q3_causal = df['casual'].quantile(0.75)
IQR2 = Q3_causal - Q1_causal
Q1_registered = df['registered'].quantile(0.25)
Q3_registered = df['registered'].quantile(0.75)
IQR3 = Q3_registered - Q1_registered
df.loc[df.windspeed > 16.997900, 'windspeed'] = np.nan
df['windspeed'].fillna(1.5*IQR1,inplace=True)
df.loc[df.casual > 49.000000, 'casual'] = np.nan
df['casual'].fillna(1.5*IQR2,inplace=True)
df.loc[df.registered >222.000000, 'registered'] = np.nan
df['registered'].fillna(1.5*IQR3,inplace=True )<prepare_x_and_y> | model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'] ) | Digit Recognizer |
5,866,910 | train_df, val_df = train_test_split(df, test_size=0.2, random_state=42,shuffle=True)
X_train = train_df.drop(columns=['count'])
y_train = train_df['count']
X_val = val_df.drop(columns=['count'])
y_val = val_df['count']
print(list(train_df.columns))<drop_column> | reduce_learning_reducer = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.3, min_lr=0.00001)
early_stopper = EarlyStopping(monitor = 'val_loss', min_delta = 1e-10, patience = 10, verbose = 1, restore_best_weights = True)
training_result = model.fit(X_train, Y_train,
batch_size=96,
epochs=150,
validation_data=(X_validation, Y_validation),
callbacks=[reduce_learning_reducer, early_stopper] ) | Digit Recognizer |
5,866,910 | X_train = X_train [['weekend','is_night','weekday','temp','season', 'workingday', 'weather','humidity', 'windspeed','Year', 'Month', 'Day', 'Hour']]
X_val = X_val [list(X_train.columns)]
X_test = test[list(X_train.columns)]<compute_train_metric> | test_result = model.test_on_batch(X_test, Y_test)
test_result | Digit Recognizer |
5,866,910 | RegModel=XGBRegressor(learning_rate = 0.07,max_depth = 10, alpha = 6, n_estimators = 350)
y_train = np.log(train_df['count']+1)
RegModel.fit(X_train,y_train)
train_predict=RegModel.predict(X_train)
train_predict=np.exp(train_predict)
Error1= np.sqrt(mean_squared_log_error(np.exp(y_train), train_predict))
print('RMSLE Training:' + str(Error1))
predict= RegModel.predict(X_val)
predict=np.exp(predict)
Error= np.sqrt(mean_squared_log_error(y_val , predict))
print('RMSLE Validation:' + str(Error))<save_to_csv> | score = model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1] ) | Digit Recognizer |
5,866,910 | y_test_predicted = RegModel.predict(X_test)
y_test_predicted=np.exp(y_test_predicted)
test['count'] = y_test_predicted.astype(int)
test[['datetime', 'count']].to_csv('/kaggle/working/submission.csv', index=False)
<set_options> | benchmark = pd.read_csv('.. /input/digit-recognizer/test.csv')
benchmark = benchmark / 255
benchmark = reshape_data(benchmark.values ) | Digit Recognizer |
5,866,910 | %matplotlib inline<load_from_csv> | prediction = model.predict(benchmark ) | Digit Recognizer |
5,866,910 | train = pd.read_csv('/kaggle/input/bike-sharing-demand/train.csv')
test = pd.read_csv('/kaggle/input/bike-sharing-demand/test.csv' )<drop_column> | submission = pd.DataFrame({
'ImageId': [i + 1 for i in range(len(benchmark)) ],
'Label': [values.argmax() for values in prediction]
})
submission.head() | Digit Recognizer |
5,866,910 | train.drop(['casual', 'registered'], axis=1, inplace=True )<data_type_conversions> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
5,866,910 | train['datetime'] = pd.to_datetime(train['datetime'], errors='coerce')
test['datetime'] = pd.to_datetime(test['datetime'], errors='coerce')
<define_variables> | submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
8,212,338 | categorical_cols=['season','holiday','workingday','weather']
numerical_cols=['temp','atemp','humidity','windspeed']
label='count'<feature_engineering> | sample_submission = pd.read_csv(".. /input/digit-recognizer/sample_submission.csv")
test = pd.read_csv(".. /input/digit-recognizer/test.csv")
train = pd.read_csv(".. /input/digit-recognizer/train.csv" ) | Digit Recognizer |
8,212,338 | train['Month'] = train['datetime'].dt.month
test['Month'] = test['datetime'].dt.month
train['Year'] = train['datetime'].dt.year
test['Year'] = test['datetime'].dt.year
train['WeekDay'] = train['datetime'].dt.weekday
test['WeekDay'] = test['datetime'].dt.weekday
train['Hour'] = train['datetime'].dt.hour
test['Hour'] = test['datetime'].dt.hour<categorify> | x = np.ndarray(shape =(train.values.shape[0], 28, 28, 1))
x_label = np.ndarray(shape = train.values.shape[0])
i = 0
for img in train.values:
x[i, :, :, 0] = np.reshape(img[1:],(28, 28)) /255
x_label[i] = img[0]
i += 1 | Digit Recognizer |
8,212,338 | def encodetime(train,test,col,label):
d3=train[[col,label]].groupby(col ).mean()
d3.sort_values(by='count',ascending=False)
plt.scatter(x=d3.index,y=d3['count'])
plt.xlabel(col)
d3=d3.sort_values(by='count')
d3['w']=np.arange(train[col].nunique())
dic=dict(zip(d3.index,d3['w']))
train[col]=train[col].map(dic)
test[col]=test[col].map(dic )<categorify> | rot_gen = ImageDataGenerator(rotation_range = 25, width_shift_range = 3, height_shift_range = 3,
brightness_range = [0.65, 1.5], shear_range = 15, zoom_range = 0.15)
x_aug = rot_gen.flow(x, batch_size=1 ) | Digit Recognizer |
8,212,338 | encodetime(train,test,'Year',label )<categorify> | Digit Recognizer |
|
8,212,338 | encodetime(train,test,'Month',label)
<categorify> | x_train, x_test, y_train, y_test = train_test_split(x, x_label, test_size=0.2, random_state=42 ) | Digit Recognizer |
8,212,338 | encodetime(train,test,'Hour',label)
<categorify> | Y_train = ks.utils.to_categorical(y_train)
Y_test = ks.utils.to_categorical(y_test ) | Digit Recognizer |
8,212,338 | encodetime(train,test,'WeekDay',label)
<feature_engineering> | new = rot_gen.flow(x_train, Y_train, batch_size = 10 ) | Digit Recognizer |
8,212,338 | features=['holiday', 'workingday', 'weather', 'temp', 'atemp', 'Hour']
def lag(data,features, shift):
for feature in features:
data['lag_'+str(shift)+'_'+feature] = data[feature].shift(shift )<create_dataframe> | for i in range(10):
print(np.argmax(lbl[i])) | Digit Recognizer |
8,212,338 |
<count_missing_values> | aug = rot_gen.flow(x_train, Y_train, batch_size = 40)
x_train_aug = np.ndarray(shape =(x_train.shape[0]*41, 28, 28, 1))
Y_train_aug = np.ndarray(shape =(Y_train.shape[0]*41, 10))
x_train_aug[: x_train.shape[0], :, :, :] = x_train
Y_train_aug[: x_train.shape[0], :] = Y_train
for i in range(x_train.shape[0] - 1):
nn = aug.next()
x_train_aug[x_train.shape[0] + i*40: x_train.shape[0] +(i + 1)*40, :, :, :] = nn[0]/255
Y_train_aug[x_train.shape[0] + i*40: x_train.shape[0] +(i + 1)*40, :] = nn[1] | Digit Recognizer |
8,212,338 | train.isnull().sum()<count_missing_values> | class makestats(Callback):
def on_epoch_end(self, batch, logs=None):
if batch % 5 == 0 and batch != 0:
YY_test = np.argmax(Y_test, axis=1)
y_pred = model.predict(x_test)
print(classification_report(YY_test, np.argmax(y_pred, axis = 1)))
print(confusion_matrix(( YY_test), y_pred.argmax(axis=1)) ) | Digit Recognizer |
8,212,338 | train.isnull().sum()<train_model> | inputA = Input(shape =(28, 28, 1))
x = Conv2D(32, kernel_size=(5, 5), strides=(1),
activation='relu',
padding = 'same' )(inputA)
x = MaxPooling2D(pool_size=(2, 2), strides=(2))(x)
x = Dropout(0.3 )(x)
x = Conv2D(64,(5, 5), activation='relu' )(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.3 )(x)
x = Flatten()(x)
x = Dense(1000, activation='relu' )(x)
x = Dropout(0.3 )(x)
z = Dense(500, activation="relu" )(x)
x = Dropout(0.3 )(x)
z = Dense(10, activation="softmax" )(z)
model = Model(inputs=inputA, outputs=z)
epochs = 40
lrate = 0.0003
decay = lrate/epochs
sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'] ) | Digit Recognizer |
8,212,338 | train.fillna(-1, inplace=True)
test.fillna(-1, inplace=True )<split> | model.fit(x_train_aug, Y_train_aug, validation_data=(x_test, Y_test), verbose = 1,
epochs=epochs, batch_size=32, callbacks = [history, makestats() ] ) | Digit Recognizer |
8,212,338 | X = train.drop(['count','datetime'],axis=1)
xtest = test.drop(['datetime'],axis=1)
y = train['count']
tss = TimeSeriesSplit(n_splits = 5)
for train_index, test_index in tss.split(X):
X_train, X_valid = X.iloc[train_index, :], X.iloc[test_index,:]
y_train, y_valid = y.iloc[train_index], y.iloc[test_index]<split> | TEST = np.ndarray(shape =(test.values.shape[0], 28, 28, 1))
i = 0
for img in test.values:
TEST[i, :, :, 0] = np.reshape(img,(28, 28)) /255
i += 1 | Digit Recognizer |
8,212,338 | xt,xv,yt,yv=train_test_split(X,y,test_size=0.2,random_state=0 )<train_model> | y_pred = model.predict(TEST ) | Digit Recognizer |
8,212,338 | def mk_model_xgb(xt,xv,yt,yv,func1,func2,lr=1,min_child_weight =25,colsample_bytree = 0.8,md=None):
model =XGBRegressor(colsample_bytree = colsample_bytree, learning_rate = lr,min_child_weight =min_child_weight, max_depth=md)
ytt=yt.apply(func1)
model.fit(xt,ytt)
ypt=np.apply_along_axis(func2,arr=model.predict(xt),axis=0)
ypv=np.apply_along_axis(func2,arr=model.predict(xv),axis=0)
print('training r2:',r2_score(yt,ypt))
print('Validation r2:',r2_score(yv,ypv))
print('training rmsle:',np.sqrt(msle(yt,ypt)))
print('validation rmsle:',np.sqrt(msle(yv,ypv)))
return model<train_model> | preds = []
for yy in y_pred:
preds.append(np.argmax(yy)) | Digit Recognizer |
8,212,338 | _=mk_model_xgb(X_train,X_valid,y_train,y_valid,func1=trans,func2=rev_trans,lr=0.2,min_child_weight =20,colsample_bytree = 0.8,md=20 )<split> | out_file = open("predictions.csv", "w")
out_file.write("ImageId,Label
")
for i in range(len(preds)) :
out_file.write(str(i+1)+ "," + str(int(preds[i])) + "
")
out_file.close() | Digit Recognizer |
11,142,293 | _=mk_model_xgb(xt,xv,yt,yv,func1=trans,func2=rev_trans,lr=0.2,min_child_weight =20,colsample_bytree = 0.8,md=20 )<train_model> | %matplotlib inline
| Digit Recognizer |
11,142,293 | model=XGBRegressor(colsample_bytree = 0.8, learning_rate = 0.2,min_child_weight =20, max_depth=20 ).fit(X,y.apply(trans))
<predict_on_test> | mnist_train = pd.read_csv(".. /input/digit-recognizer/train.csv")
mnist_test = pd.read_csv(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
11,142,293 | log_y_pred = model.predict(xtest )<concatenate> | mnist_train_data = mnist_train.loc[:, "pixel0":]
mnist_train_label = mnist_train.loc[:, "label"]
mnist_train_data = mnist_train_data/255.0
mnist_test = mnist_test/255.0 | Digit Recognizer |
11,142,293 | yp=np.apply_along_axis(rev_trans,arr=log_y_pred,axis=0)
yp = np.round(yp )<prepare_output> | nclasses = mnist_train_label.max() - mnist_train_label.min() + 1
mnist_train_label = np_utils.to_categorical(mnist_train_label, num_classes = nclasses)
print("Shape of y_train after encoding: ", mnist_train_label.shape ) | Digit Recognizer |
11,142,293 | submission = test[['datetime']].copy()
submission['count'] = yp
submission.head()<save_to_csv> | x_train=mnist_train_data
x_test=mnist_test_arr
y_train=mnist_train_label
x_train.shape | Digit Recognizer |
11,142,293 | submission.to_csv('/kaggle/working/submission.csv', index=False )<import_modules> | x_val=x_train[33600:]
x=x_train[:33600]
y_val= y_train[33600:]
y=y_train[:33600]
y.shape | Digit Recognizer |
11,142,293 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.feature_selection import VarianceThreshold
from sklearn.linear_model import ElasticNet
import matplotlib.pyplot as plt
from sklearn.preprocessing import QuantileTransformer, quantile_transform
from sklearn.compose import TransformedTargetRegressor
from sklearn.linear_model import RidgeCV
from sklearn.compose import ColumnTransformer
import xgboost
from sklearn.metrics import mean_squared_log_error<feature_engineering> | from tensorflow.keras.preprocessing.image import ImageDataGenerator | Digit Recognizer |
11,142,293 | def trans(x,l1=0.3,l2=0):
if l1!=0:
return(( x+l2)**l1-1)/l1
else:
return np.log(x+l2)
def rev_trans(x,l1=0.3,l2=0):
return(x*l1+1)**(1/l1)-l2
<load_from_csv> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(x ) | Digit Recognizer |
11,142,293 | df = pd.read_csv("/kaggle/input/bike-sharing-demand/train.csv" )<feature_engineering> | from keras.optimizers import SGD | Digit Recognizer |
11,142,293 | df['windspeed'] = np.where(df['windspeed']==0.0,8.9981,df['windspeed'] )<feature_engineering> | model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(5,5), activation='relu', input_shape=(x_train.shape[1:])) ,
tf.keras.layers.Conv2D(32,(5,5), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)) ,
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Conv2D(64,(5,5), activation='relu'),
tf.keras.layers.Conv2D(64,(5,5), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2,2), strides=(2,2)) ,
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten() ,
tf.keras.layers.Dense(2048, activation='relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation='softmax')
])
learning_r= 0.01
weight_decay=1e-6
mometum=0.5
sgd= SGD(lr=learning_r,decay=weight_decay,momentum=mometum, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
model.summary()
| Digit Recognizer |
11,142,293 | df['humidity'] = np.where(df['humidity']==0.0,8,df['humidity'] )<drop_column> | hist= model.fit(datagen.flow(x,y),epochs=50, validation_data=(x_val,y_val), verbose=1 ) | Digit Recognizer |
11,142,293 | df.drop(columns='temp',inplace = True )<feature_engineering> | p = model.predict(x_test ) | Digit Recognizer |
11,142,293 | df['windspeed'] = np.log(df['windspeed'] )<count_values> | p_t = []
for i in p:
p_t.append(np.argmax(i))
| Digit Recognizer |
11,142,293 | <feature_engineering><EOS> | submission = pd.DataFrame({
"ImageId": mnist_test.index+1,
"Label": p_t
})
submission.to_csv('my_submission.csv', index=False ) | Digit Recognizer |
11,152,622 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values> | deviceCount = torch.cuda.device_count()
print(deviceCount)
cuda0 = None
if deviceCount > 0:
print(torch.cuda.get_device_name(0))
cuda0 = torch.device('cuda:0' ) | Digit Recognizer |
11,152,622 | df['weather'].value_counts()<data_type_conversions> | df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
print(df.shape)
dataset_size = df.shape[0]
df.head() | Digit Recognizer |
11,152,622 | df['datetime'] = pd.to_datetime(df['datetime'] )<feature_engineering> | x = df.drop('label', axis=1)
x = x.values.reshape(dataset_size, 1, 28, 28)
y = df['label'].values.reshape(dataset_size)
df = None | Digit Recognizer |
11,152,622 | df['Hour'] = df['datetime'].dt.hour
df['DayOfWeek'] = df['datetime'].dt.dayofweek
df['Year'] = df['datetime'].dt.year
df['WeekOfYear'] = df['datetime'].dt.isocalendar().week
df['Month'] = df['datetime'].dt.month<feature_engineering> | cross_validation_ratio = 0.05
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=cross_validation_ratio, random_state=93)
x = None
y = None | Digit Recognizer |
11,152,622 | hourMonthSeason = df.pivot_table(index=['season','Month','DayOfWeek','Hour'],values = 'count',aggfunc=np.mean ).reset_index()
hourMonthSeason['y_SMH'] = hourMonthSeason['count']<merge> | start_time = time.time()
def shift_image(image, dx, dy):
shifted_image = shift(image, [dy, dx], cval=0, mode="constant")
return shifted_image
augmented_x = []
augmented_y = []
random.seed(103)
percentage_processed = 0
for i in range(len(x_train)) :
augmented_x.append(x_train[i].astype(int))
augmented_y.append(y_train[i].astype(int))
for j in range(5):
image = x_train[i].reshape(( 28, 28))
x_shift = random.randint(-1, 1)
y_shift = random.randint(-1, 1)
rotation_deg = float(random.randint(-15, 15))
image = rotate(image, angle=rotation_deg, cval=0, mode="constant", preserve_range=True)
image = np.rint(image)
image = image.astype(int)
image = shift_image(image, x_shift, y_shift)
image = x_train[i].reshape(( 1, 28, 28))
augmented_x.append(image)
augmented_y.append(y_train[i].astype(int))
if(i+1)/len(x_train)*100 >= percentage_processed+10:
print(f'{int(( i+1)/len(x_train)*100):3}% images processed')
percentage_processed += 10
train_dataset_size = len(augmented_x)
test_dataset_size = len(x_test)
x_train = None
y_train = None
print(f'
Duration: {time.time() - start_time:.0f} seconds' ) | Digit Recognizer |
11,152,622 | df = pd.merge(df,hourMonthSeason[['season','Month','DayOfWeek','Hour','y_SMH']],on = ['season','Month','DayOfWeek','Hour'],how = 'left' )<drop_column> | random.seed(93)
for i in range(len(augmented_x)) :
index = random.randint(0, len(augmented_x)-1)
tempx = augmented_x[i]
tempy = augmented_y[i]
augmented_x[i] = augmented_x[index]
augmented_y[i] = augmented_y[index]
augmented_x[index] = tempx
augmented_y[index] = tempy | Digit Recognizer |
11,152,622 | X = df.copy()
X.drop(columns = ['WeekOfYear','casual', 'count', 'registered','holiday'],inplace = True )<split> | trainingDataset = TensorDataset(x_train, y_train)
testDataset = TensorDataset(x_test, y_test)
trainloader = DataLoader(trainingDataset, batch_size=512, shuffle=True)
testloader = DataLoader(testDataset, batch_size=512, shuffle=False ) | Digit Recognizer |
11,152,622 | y_log=trans(df['count'])
X_train, X_eval, y_train, y_eval=train_test_split(X, y_log, test_size=0.3, random_state=42 )<choose_model_class> | class ConvolutionalNetwork(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 3, 1)
self.conv2 = nn.Conv2d(16, 32, 3, 1)
self.fc1 = nn.Linear(5*5*32, 140)
self.fc2 = nn.Linear(140, 80)
self.fc3 = nn.Linear(80,10)
self.dropout1 = nn.Dropout(p=0.5)
self.dropout2 = nn.Dropout(p=0.5)
self.bn1 = nn.BatchNorm1d(140)
self.bn2 = nn.BatchNorm1d(80)
self.conv1_bn = nn.BatchNorm2d(16)
self.conv2_bn = nn.BatchNorm2d(32)
def forward(self, X):
X = self.conv1(X)
X = self.conv1_bn(X)
X = F.relu(X)
X = F.max_pool2d(X, 2, 2)
X = self.conv2(X)
X = self.conv2_bn(X)
X = F.relu(X)
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 5*5*32)
X = self.fc1(X)
X = self.bn1(X)
X = F.relu(X)
X = self.dropout1(X)
X = self.fc2(X)
X = self.bn2(X)
X = F.relu(X)
X = self.dropout2(X)
X = self.fc3(X)
return X | Digit Recognizer |
11,152,622 | xgb_best = xgboost.XGBRegressor(colsample_bytree= 0.7,
learning_rate =0.03,
max_depth= 5,
min_child_weight= 0.4,
n_estimators= 250,
nthread= 4,
objective= 'reg:linear',
silent= 1,
subsample= 0.7)
xgb_best.fit(X_train,y_train)
pred = xgb_best.predict(X_eval)
print(r2_score(y_eval,pred))
print(np.sqrt(mean_squared_error(y_eval,pred)))
<load_from_csv> | torch.manual_seed(103)
torch.cuda.manual_seed(103)
model = ConvolutionalNetwork()
if cuda0 != None:
model.to(cuda0)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters() , lr=0.001)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
print(model ) | Digit Recognizer |
11,152,622 | df_test = pd.read_csv("/kaggle/input/bike-sharing-demand/test.csv" )<feature_engineering> | start_time = time.time()
epochs = 45
train_losses = []
test_losses = []
train_correct = []
test_correct = []
for i in range(epochs):
model.train()
epoch_start_time = time.time()
trn_corr = 0
tst_corr = 0
total = 0
currentLoss = 0
for currentBatch in enumerate(trainloader):
bno = currentBatch[0] + 1
x = currentBatch[1][0]
y = currentBatch[1][1]
y_pred = model(x)
loss = criterion(y_pred, y)
lambdaParam = torch.tensor(0.05)
l2_reg = torch.tensor(0.)
if cuda0 != None:
lambdaParam = lambdaParam.cuda()
l2_reg = l2_reg.cuda()
for param in model.parameters() :
if cuda0 != None:
l2_reg += torch.norm(param ).cuda()
else:
l2_reg += torch.norm(param)
loss += lambdaParam * l2_reg
y_pred = F.log_softmax(y_pred, dim=1)
predicted = torch.max(y_pred.data, 1)[1]
batch_corr =(predicted == y ).sum()
trn_corr += batch_corr
optimizer.zero_grad()
loss.backward()
optimizer.step()
currentLoss += loss.item()
total += len(currentBatch[1][1])
if bno%100 == 0 or bno==1:
printStr = f'epoch: {i+1} batch: {bno:3} loss: {loss.item() :10.8f} accuracy: {trn_corr.item() /total*100:6.3f}%'
print(printStr)
train_losses.append(currentLoss/bno)
train_correct.append(trn_corr.item())
currentLoss = 0
model.eval()
with torch.no_grad() :
for currentBatch in enumerate(testloader):
bno = currentBatch[0] + 1
x = currentBatch[1][0]
y = currentBatch[1][1]
y_pred = model(x)
predicted = torch.max(y_pred.data, 1)[1]
tst_corr +=(predicted == y ).sum()
loss = criterion(y_pred, y)
currentLoss += loss.item()
test_losses.append(currentLoss/bno)
test_correct.append(tst_corr.item())
print('Summary of Epoch {}:'.format(i+1))
print(f'Train Loss: {train_losses[i]:10.8f} Train Accuracy: {train_correct[i]/train_dataset_size*100:6.3f}%')
print(f'Test Loss: {test_losses[i]:10.8f} Test Accuracy: {test_correct[i]/test_dataset_size*100:6.3f}%')
print(f'Epoch Duration: {time.time() - epoch_start_time:.0f} seconds')
print('')
scheduler.step()
print(f'
Duration: {time.time() - start_time:.0f} seconds' ) | Digit Recognizer |
11,152,622 | df_test['windspeed'] = np.where(df_test['windspeed']==0.0,8.9981,df_test['windspeed'])
df_test['humidity'] = np.where(df_test['humidity']==0.0,8,df_test['humidity'] )<feature_engineering> | x_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
x_test = x_test.values.reshape(28000, 1, 28, 28)
x_test = torch.FloatTensor(x_test)
if cuda0 != None:
x_test = x_test.cuda() | Digit Recognizer |
11,152,622 | df_test.drop(columns='temp',inplace = True)
df_test['windspeed'] = np.log(df_test['windspeed'])
df_test['weather'] = np.where(df_test['weather']==4,3,df_test['weather'] )<feature_engineering> | with torch.no_grad() :
model.eval()
y_pred = model(x_test)
predictions = torch.max(y_pred.data, 1)[1]
predictions = predictions.cpu().detach().numpy()
ids = [id+1 for id in range(len(predictions)) ]
output = pd.DataFrame({'ImageId': ids, 'Label': predictions} ) | Digit Recognizer |
11,152,622 | <merge><EOS> | output.to_csv('/kaggle/working/my_submission.csv', index=False ) | Digit Recognizer |
11,129,033 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<drop_column> | %matplotlib inline
| Digit Recognizer |
11,129,033 | X_test = df_test.copy()
test_date = X_test['datetime']
X_test.drop(columns = ['WeekOfYear','datetime','holiday'],inplace = True )<drop_column> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.