kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,955,886 | np.allclose(out.iloc[:,0].values, out2.iloc[:,0].values, atol=0.01 )<define_variables> | print("Highest Validation Accuracy so far : {}%".format(round(100*max(history.history['val_accuracy']), 2)) ) | Digit Recognizer |
11,955,886 | rows = 32
columns = 40
batch_size = 100
epochs = 50
base_path = root_path + "/working/models"
if not os.path.exists(base_path):
os.makedirs(base_path)
train_size = train_set.shape[0]
validation_size = validation_set.shape[0]
steps_per_epoch = train_size//batch_size
lr = 1e-3
tensorboard_dir=base_path + "/logs"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=tensorboard_dir)
checkpoint_filepath = os.path.join(base_path, 'cp-{epoch:04d}.ckpt')
checkpoint_callback = keras.callbacks.ModelCheckpoint(
filepath= checkpoint_filepath,
save_best_only=True,
save_weights_only=True,
monitor='val_acc',
mode='max',
verbose=1)
reduce_lr_callback = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=3, min_lr=1e-5, vebose=1)
earlystop_callback = keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=1e-3,
patience=5,
verbose=1)
optimizer = keras.optimizers.Adam(learning_rate = lr)
loss_fn = keras.losses.SparseCategoricalCrossentropy()
acc_metric = keras.metrics.SparseCategoricalAccuracy()
model = build_model(len(classes_map),(rows, columns, 1))
model.compile(optimizer = optimizer, loss = loss_fn, metrics= [acc_metric])
model.summary()<define_variables> | modelacc.sort(reverse=True)
modelacc | Digit Recognizer |
11,955,886 |
<train_model> | pred=model.predict([test])
soln=[]
for i in range(len(pred)) :
soln.append(np.argmax(pred[i])) | Digit Recognizer |
11,955,886 | history = model.fit(train_datagen,
steps_per_epoch= steps_per_epoch,
epochs = epochs,
validation_data = validation_datagen,
validation_steps = validation_size//batch_size,
callbacks=[earlystop_callback, reduce_lr_callback, checkpoint_callback, tensorboard_callback],
use_multiprocessing=True )<set_options> | final = pd.DataFrame()
final['ImageId']=[i+1 for i in test_x.index]
final['Label']=soln
final.to_csv('mnistcnn.csv', index=False ) | Digit Recognizer |
12,060,595 | shutil.rmtree(train_path)
gc.collect()<load_pretrained> | %matplotlib inline
if torch.cuda.is_available() :
torch.backends.cudnn.deterministic = True | Digit Recognizer |
12,060,595 | if not os.path.exists(root_path + '/working/test/'):
os.makedirs(root_path + '/working/test/')
Archive(root_path + '/input/tensorflow-speech-recognition-challenge/test.7z' ).extractall(root_path + '/working')
test_path = root_path + '/working/test'<predict_on_test> | train_df = pd.read_csv(".. /input/digit-recognizer/train.csv")
test_df = pd.read_csv(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
12,060,595 | test_data,test_labels,_ = get_image_list(test_path)
test_data = test_data[0]
test_labels = test_labels[0]
test_datagen = data_generator(test_data, test_labels, batch_size, None)
test_size = len(test_data)
test_steps = np.ceil(test_size /(batch_size))
y_pred = model.predict_generator(test_datagen, steps = test_steps, verbose=1 )<prepare_output> | train_df.iloc[:,1:] | Digit Recognizer |
12,060,595 | y_labs = np.argmax(y_pred, axis=1 )<define_variables> | class MNISTDataset(Dataset):
def __init__(self, dataframe,
transform = transforms.Compose([transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,)) ])
):
df = dataframe
self.n_pixels = 784
if len(df.columns)== self.n_pixels:
self.X = df.values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = None
else:
self.X = df.iloc[:,1:].values.reshape(( -1,28,28)).astype(np.uint8)[:,:,:,None]
self.y = torch.from_numpy(df.iloc[:,0].values)
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx] ) | Digit Recognizer |
12,060,595 | inv_map = inv_map = {v: k for k, v in classes_map.items() }<normalization> | class MNISTResNet(ResNet):
def __init__(self):
super().__init__(BasicBlock, [2, 2, 2, 2], num_classes=10)
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=1, padding=3,bias=False)
model = MNISTResNet()
print(model ) | Digit Recognizer |
12,060,595 | train_audio_sample = test_data[16]
x,sr = librosa.load(train_audio_sample, sr = 16000)
ipd.Audio(x, rate=sr )<normalization> | def train(train_loader, model, criterion, optimizer, epoch):
model.train()
loss_train = 0
for batch_idx,(data, target)in enumerate(train_loader):
if torch.cuda.is_available() :
data = data.cuda()
target = target.cuda()
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(batch_idx + 1)% 100 == 0:
print('Train Epoch: {} [{}/{}({:.0f}%)]\tLoss: {:.6f}'.format(
epoch,(batch_idx + 1)* len(data), len(train_loader.dataset),
100.*(batch_idx + 1)/ len(train_loader), loss.data.item()))
| Digit Recognizer |
12,060,595 | train_audio_sample = test_data[100]
x,sr = librosa.load(train_audio_sample, sr = 16000)
ipd.Audio(x, rate=sr )<normalization> | def validate(val_loader, model, criterion):
model.eval()
loss = 0
correct = 0
for _,(data, target)in enumerate(val_loader):
if torch.cuda.is_available() :
data = data.cuda()
target = target.cuda()
output = model(data)
loss += criterion(output, target ).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
loss /= len(val_loader.dataset)
print('
On Val set Average loss: {:.4f}, Accuracy: {}/{}({:.3f}%)
'.format(
loss, correct, len(val_loader.dataset),
100.0 * float(correct)/ len(val_loader.dataset)))
| Digit Recognizer |
12,060,595 | train_audio_sample = test_data[1001]
x,sr = librosa.load(train_audio_sample, sr = 16000)
ipd.Audio(x, rate=sr )<filter> | train_transforms = transforms.Compose(
[transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,)) ])
val_test_transforms = transforms.Compose(
[transforms.ToPILImage() ,
transforms.ToTensor() ,
transforms.Normalize(mean=(0.5,), std=(0.5,)) ] ) | Digit Recognizer |
12,060,595 | inv_map[y_labs[1001]]<set_options> | total_epoches = 20
step_size = 5
base_lr = 0.01
batch_size = 64
optimizer = optim.Adam(model.parameters() , lr=base_lr)
criterion = nn.CrossEntropyLoss()
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.1)
if torch.cuda.is_available() :
model = model.cuda()
criterion = criterion.cuda() | Digit Recognizer |
12,060,595 | shutil.rmtree(test_path )<save_to_csv> | def split_dataframe(dataframe=None, fraction=0.9, rand_seed=1):
df_1 = dataframe.sample(frac=fraction, random_state=rand_seed)
df_2 = dataframe.drop(df_1.index)
return df_1, df_2 | Digit Recognizer |
12,060,595 | my_submission = pd.DataFrame({'fname': test_data, 'label': [inv_map[x] for x in y_labs]})
my_submission.to_csv('submission.csv', index=False )<set_options> | for epoch in range(total_epoches):
print("
Train Epoch {}: lr = {}".format(epoch, exp_lr_scheduler.get_lr() [0]))
train_df_new, val_df = split_dataframe(dataframe=train_df, fraction=0.9, rand_seed=epoch)
train_dataset = MNISTDataset(train_df_new, transform=train_transforms)
val_dataset = MNISTDataset(val_df, transform=val_test_transforms)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size, shuffle=False)
train(train_loader=train_loader, model=model, criterion=criterion, optimizer=optimizer, epoch=epoch)
validate(val_loader=val_loader, model=model, criterion=criterion)
exp_lr_scheduler.step()
| Digit Recognizer |
12,060,595 | for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | def prediciton(test_loader, model):
model.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(test_loader):
if torch.cuda.is_available() :
data = data.cuda()
output = model(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat(( test_pred, pred), dim=0)
return test_pred | Digit Recognizer |
12,060,595 | train = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/train.csv.zip')
test = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/test.csv.zip')
train_Id = train.Id
test_Id = test.Id
train.drop('Id', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True )<set_options> | test_batch_size = 64
test_dataset = MNISTDataset(test_df, transform=val_test_transforms)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=test_batch_size, shuffle=False)
test_pred = prediciton(test_loader, model)
test_pred_df = pd.DataFrame(np.c_[np.arange(1, len(test_dataset)+1), test_pred.numpy() ],
columns=['ImageId', 'Label'])
print(test_pred_df.head() ) | Digit Recognizer |
12,060,595 | pd.options.display.max_columns = None
pd.options.display.max_rows = 80
pd.options.display.float_format = '{:.2f}'.format
%matplotlib inline
warnings.filterwarnings('ignore')
%matplotlib inline<data_type_conversions> | def create_download_link(df, title = "Download CSV file", filename = "data.csv"):
csv = df.to_csv(index=False)
b64 = base64.b64encode(csv.encode())
payload = b64.decode()
html = '<a download="{filename}" href="data:text/csv;base64,{payload}" target="_blank">{title}</a>'
html = html.format(payload=payload,title=title,filename=filename)
return HTML(html)
create_download_link(test_pred_df, filename="submission.csv" ) | Digit Recognizer |
12,060,595 | train['pd_date'] = pd.to_datetime(train['Open Date'], format='%m/%d/%Y')
train['Open_Year'] = train['pd_date'].dt.strftime('%Y')
train['Open_Month'] = train['pd_date'].dt.strftime('%m')
train["Open Date"] = pd.to_datetime(train["Open Date"])
train["Day"] = train["Open Date"].apply(lambda x:x.day)
train["kijun"] = "2015-04-27"
train["kijun"] = pd.to_datetime(train["kijun"])
train["BusinessPeriod"] =(train["kijun"] - train["Open Date"] ).apply(lambda x: x.days)
train = train.drop('kijun', axis=1)
train = train.drop('pd_date',axis=1)
train = train.drop('Open Date',axis=1 )<data_type_conversions> | test_pred_df.to_csv('submission.csv', index=False ) | Digit Recognizer |
12,061,993 | test['pd_date'] = pd.to_datetime(test['Open Date'], format='%m/%d/%Y')
test['Open_Year'] = test['pd_date'].dt.strftime('%Y')
test['Open_Month'] = test['pd_date'].dt.strftime('%m')
test["Open Date"] = pd.to_datetime(test["Open Date"])
test["Day"] = test["Open Date"].apply(lambda x:x.day)
test["kijun"] = "2015-04-27"
test["kijun"] = pd.to_datetime(test["kijun"])
test["BusinessPeriod"] =(test["kijun"] - test["Open Date"] ).apply(lambda x: x.days)
test = test.drop('kijun', axis=1)
test = test.drop('pd_date',axis=1)
test = test.drop('Open Date',axis=1 )<count_values> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
12,061,993 | train.dtypes.value_counts()<count_unique_values> | from sklearn.model_selection import train_test_split | Digit Recognizer |
12,061,993 | train.nunique(axis=0 )<define_variables> | from sklearn.model_selection import train_test_split | Digit Recognizer |
12,061,993 | print(f'categorical variables: {cats}')
print(f'numerical variables: {nums}' )<define_variables> | X = train.drop('label',axis=1)
y = train['label'] | Digit Recognizer |
12,061,993 | nominal_list =cats
num_list = nums
<drop_column> | X = X/255
test = test/255 | Digit Recognizer |
12,061,993 | train = train.drop('Open_Month',axis=1)
test= test.drop('Open_Month',axis=1)
nominal_list.remove('Open_Month' )<feature_engineering> | from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten
from tensorflow.keras.callbacks import EarlyStopping | Digit Recognizer |
12,061,993 | mean_revenue_per_city = train[['City', 'revenue']].groupby('City', as_index=False ).mean()
mean_revenue_per_city.head()
mean_revenue_per_city['revenue'] = mean_revenue_per_city['revenue'].apply(lambda x: int(x/1e6))
mean_revenue_per_city
mean_dict = dict(zip(mean_revenue_per_city.City, mean_revenue_per_city.revenue))
mean_dict<count_values> | early_stop = EarlyStopping(monitor='accuracy',mode='max',min_delta=0.005,verbose=7,patience=5 ) | Digit Recognizer |
12,061,993 | l1_l2_and = set(city_train_list)& set(city_test_list)
print(l1_l2_and)
print(len(l1_l2_and))<concatenate> | model = Sequential()
model.add(Conv2D(64, 3, activation='relu', input_shape=(28, 28, 1)))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(32, 3, activation='relu'))
model.add(MaxPooling2D(( 2, 2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dense(10,activation='softmax'))
model.compile(optimizer='adam',loss='sparse_categorical_crossentropy',metrics=['accuracy'] ) | Digit Recognizer |
12,061,993 | len(set(city_test_list ).difference(city_train_list))<concatenate> | model.fit(X,y, epochs=1000, callbacks=[early_stop] ) | Digit Recognizer |
12,061,993 | len(set(city_train_list ).difference(city_test_list))<predict_on_test> | predictions = model.predict_classes(test ) | Digit Recognizer |
12,061,993 | def adjust_cities(full_full_data, train, k):
relevant_pvars = ["P1", "P2", "P11", "P19", "P20", "P23","P30"]
train = train.loc[:, relevant_pvars]
kmeans = cluster.KMeans(n_clusters=k)
kmeans.fit(train)
full_data['City_Cluster'] = kmeans.predict(full_data.loc[:, relevant_pvars])
return full_data<concatenate> | df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv' ) | Digit Recognizer |
12,061,993 | num_train = train.shape[0]
num_test = test.shape[0]
print(num_train, num_test)
full_data = pd.concat([train, test], ignore_index=True )<drop_column> | df['Label'] = predictions | Digit Recognizer |
12,061,993 | full_data = adjust_cities(full_data, train, 20)
full_data
full_data = full_data.drop(['City'], axis=1 )<train_model> | df.to_csv('submission.csv',line_terminator='\r
', index=False ) | Digit Recognizer |
12,001,799 | train = full_data[:num_train]
test = full_data[num_train:]
print("Train :",train.shape)
print("Test:",test.shape)
test<feature_engineering> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Dropout
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import itertools | Digit Recognizer |
12,001,799 | mean_revenue_per_city = train[['City_Cluster', 'revenue']].groupby('City_Cluster', as_index=False ).mean()
mean_revenue_per_city.head()
mean_revenue_per_city['revenue'] = mean_revenue_per_city['revenue'].apply(lambda x: int(x/1e6))
mean_revenue_per_city
mean_dict = dict(zip(mean_revenue_per_city.City_Cluster, mean_revenue_per_city.revenue))
mean_dict<create_dataframe> | train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' ) | Digit Recognizer |
12,001,799 | city_rev = []
for i in full_data['City_Cluster']:
for key, value in mean_dict.items() :
if i == key:
city_rev.append(value)
df_city_rev = pd.DataFrame({'city_rev':city_rev})
full_data = pd.concat([full_data,df_city_rev],axis=1)
full_data.head
nominal_list.extend(['City_Cluster'])
nominal_list.remove('City' )<categorify> | X_train = train.iloc[:,1:]
y_train = train.iloc[:,0] | Digit Recognizer |
12,001,799 | le = LabelEncoder()
le_count = 0
for i in range(len(nominal_list)) :
if len(list(full_data[nominal_list[i]].unique())) <= 2:
le.fit(full_data[nominal_list[i]])
full_data[nominal_list[i]] = le.transform(full_data[nominal_list[i]])
le_count += 1
print('%d columns were label encoded.' % le_count )<categorify> | X_train = X_train.values.reshape(-1, 28, 28, 1)/255.
test = test.values.reshape(-1, 28, 28, 1)/255.
y_train = to_categorical(y_train, 10 ) | Digit Recognizer |
12,001,799 | full_data = pd.get_dummies(full_data)
print('full_dataing Features shape: ', full_data.shape )<randomize_order> | random_seed = 0
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=random_seed ) | Digit Recognizer |
12,001,799 | def tukey_outliers(x):
q1 = np.percentile(x,25)
q3 = np.percentile(x,75)
iqr = q3-q1
min_range = q1 - iqr*1.5
max_range = q3 + iqr*1.5
outliers = x[(x<min_range)|(x>max_range)]
return outliers<sort_values> | datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1
) | Digit Recognizer |
12,001,799 | skewed_data = train[num_list].apply(lambda x: skew(x)).sort_values(ascending=False)
skewed_data[:10]<train_model> | model = Sequential()
model.add(Conv2D(32,(5,5), padding='same', input_shape=X_train.shape[1:], activation='relu'))
model.add(Conv2D(32,(5,5), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(2,2))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.summary() | Digit Recognizer |
12,001,799 | train = full_data[:num_train]
test = full_data[num_train:]
print("Train :",train.shape)
print("Test:",test.shape )<train_model> | model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] ) | Digit Recognizer |
12,001,799 | ( X_train, X_test, y_train, y_test)= train_test_split(train_X, train_y , test_size = 0.3 , random_state = 0)
print("X_train: "+str(X_train.shape))
print("X_test: "+str(X_test.shape))
print("y_train: "+str(y_train.shape))
print("y_test: "+str(y_test.shape))<import_modules> | EPOCHS = 20
BATCH_SIZE = 20
callback_list = [
ReduceLROnPlateau(monitor='val_loss', factor=0.25, patience=1, verbose=1, mode='auto',
min_delta=0.0001)
]
history = model.fit(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE),
epochs=EPOCHS,
callbacks=callback_list,
validation_data=(X_val, y_val),
steps_per_epoch=X_train.shape[0] // BATCH_SIZE ) | Digit Recognizer |
12,001,799 | <choose_model_class><EOS> | results = model.predict(test)
results = np.argmax(results, axis=1)
results = pd.Series(results, name='Label')
submission = pd.concat([pd.Series(range(1,28001), name='ImageID'), results], axis=1)
submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
11,983,532 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<sort_values> | pd.set_option('display.max_rows', 1000)
warnings.filterwarnings("ignore")
dftrain = pd.read_csv('.. /input/digit-recognizer/train.csv')
dftest = pd.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
11,983,532 | print(cv_res.sort_values(ascending=False, by='CrossValMeans'))<train_model> | IMG_SIZE = 28
x_train = dftrain.iloc[:,1:]
x_train = x_train.values.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
y_train = dftrain.iloc[:,0]
x_test = dftest
x_test = x_test.values.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
x_train = x_train/255.0
x_test = x_test/255.0 | Digit Recognizer |
11,983,532 | def objective(trial):
params = {
'alpha': trial.suggest_loguniform("alpha", 0.1, 5),
'fit_intercept': trial.suggest_categorical('fit_intercept', [True, False]),
'normalize': trial.suggest_categorical('normalize', [True, False]),
}
reg = Ridge(**params)
reg.fit(X_train, y_train)
y_pred = reg.predict(X_test)
mae = mean_absolute_error(y_test, y_pred)
return mae
<find_best_params> | X_train, X_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.1, random_state = 42)
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1)
datagen.fit(X_train ) | Digit Recognizer |
11,983,532 | study = optuna.create_study()
study.optimize(objective, n_trials=100)
print(f'best score: {study.best_value:.4f}, best params: {study.best_params}' )<find_best_model_class> | earlystopping = EarlyStopping(monitor ="val_accuracy",
mode = 'auto', patience = 30,
restore_best_weights = True)
model = Sequential()
model.add(Conv2D(128,(3, 3), input_shape = x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Conv2D(512,(3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
11,983,532 | params = {'alpha': 1.7927096237808111, 'fit_intercept': True, 'normalize': True}
reg = Ridge(**params)
reg.fit(X_train, y_train)
prediction_log = reg.predict(test_X)
prediction =np.exp(prediction_log)
print(prediction )<save_to_csv> | plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True ) | Digit Recognizer |
11,983,532 | submission = pd.DataFrame({"Id":test_Id, "Prediction":prediction})
submission.to_csv("submission.csv", index=False )<load_from_csv> | model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
EPOCHS = 1000
BATCH_SIZE=64
history = model.fit(datagen.flow(X_train, y_train), epochs=EPOCHS, batch_size=BATCH_SIZE, validation_data=(X_val, y_val), callbacks=[earlystopping] ) | Digit Recognizer |
11,983,532 | df = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/train.csv.zip')
df.shape<load_from_csv> | print("Max.Validation Accuracy: {}%".format(round(100*max(history.history['val_accuracy']), 2)) ) | Digit Recognizer |
11,983,532 | test_data = pd.read_csv('/kaggle/input/restaurant-revenue-prediction/test.csv.zip')
test_data.shape<count_missing_values> | predictions = model.predict([x_test])
solutions = []
for i in range(len(predictions)) :
solutions.append(np.argmax(predictions[i])) | Digit Recognizer |
11,983,532 | df.sum().isnull()<feature_engineering> | final = pd.DataFrame()
final['ImageId']=[i+1 for i in dftest.index]
final['Label']=solutions
final.to_csv('submission.csv', index=False ) | Digit Recognizer |
11,828,766 | test_data.loc[test_data['Type']=='MB', 'Type'] = 'DT'<data_type_conversions> | sample_sub = '.. /input/digit-recognizer/sample_submission.csv'
test = '.. /input/digit-recognizer/test.csv'
train = '.. /input/digit-recognizer/train.csv' | Digit Recognizer |
11,828,766 | df['Open Date'] = pd.to_datetime(df['Open Date'])
test_data['Open Date'] = pd.to_datetime(test_data['Open Date'] )<drop_column> | %matplotlib inline
| Digit Recognizer |
11,828,766 | df.drop('Id',axis=1,inplace=True)
df.drop('City', axis=1, inplace = True)
test_data.drop('City', axis = 1, inplace = True)
df.head()<feature_engineering> | train_data = pd.read_csv(train)
test_data = pd.read_csv(test ) | Digit Recognizer |
11,828,766 | launch_date = datetime.datetime(2015, 3, 23)
df['Days Open'] =(launch_date - df['Open Date'] ).dt.days / 1000
test_data['Days Open'] =(launch_date - test_data['Open Date'] ).dt.days / 1000
df.drop('Open Date', axis=1, inplace=True)
test_data.drop('Open Date', axis=1, inplace=True )<categorify> | y_train = train_data ['label']
X_train = train_data.drop(labels = ["label"], axis = 1)
X_test = test_data | Digit Recognizer |
11,828,766 | columnsToEncode = df.select_dtypes(include=[object] ).columns
df = pd.get_dummies(df, columns=columnsToEncode, drop_first=False)
test_data = pd.get_dummies(test_data, columns=columnsToEncode, drop_first=False )<split> | X_train = X_train/255
X_test = X_test/255 | Digit Recognizer |
11,828,766 | df['revenue'] = np.log1p(df['revenue'])
X, y = df.drop('revenue', axis=1), df['revenue']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42 )<train_model> | y_train = to_categorical(y_train, num_classes = 10 ) | Digit Recognizer |
11,828,766 | clf = LinearRegression().fit(X_train, y_train)
clf.score(X_train, y_train )<train_on_grid> | datagen = ImageDataGenerator(
rotation_range = 10,
zoom_range = 0.1,
width_shift_range = 0.1,
height_shift_range = 0.1 ) | Digit Recognizer |
11,828,766 | params_grid = {
'max_depth': [10, 30, 35, 50, 65, 75, 100],
'max_features': [.3,.4,.5,.6],
'min_samples_leaf': [3, 4, 5],
'min_samples_split': [8, 10, 12],
'n_estimators': [30, 50, 100, 200]
}
rf_regressor = RandomForestRegressor()
rf_cv_regressor = GridSearchCV(rf_regressor, params_grid, scoring='neg_root_mean_squared_error', cv = 10, n_jobs = -1)
rf_cv_regressor.fit(X_train, y_train)
print('Best params {}'.format(rf_cv_regressor.best_params_))
print('Best score {}'.format(rf_cv_regressor.best_score_))<train_model> | model = Sequential()
model.add(Conv2D(32,kernel_size=3, activation='relu', input_shape=(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32,kernel_size=5, strides=2, padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size=3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=3, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(64, kernel_size=5, strides=2, padding='same' ,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation ='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax')) | Digit Recognizer |
11,828,766 | rf_regressor = RandomForestRegressor(max_depth = 10,
max_features = 0.6,
min_samples_leaf = 3,
min_samples_split = 8,
n_estimators = 50)
rf_regressor.fit(X_train, y_train )<compute_test_metric> | model.compile(optimizer = "adam", loss = "categorical_crossentropy", metrics = ["accuracy"] ) | Digit Recognizer |
11,828,766 | y_train_pred = rf_regressor.predict(X_train)
y_pred = rf_regressor.predict(X_test)
train_rmse = np.sqrt(mean_squared_error(y_train_pred, y_train))
test_rmse = np.sqrt(mean_squared_error(y_test, y_pred))
print(f'Train RMSE: {train_rmse:.4f}')
print(f'Test RMSE: {test_rmse:.4f}' )<save_to_csv> | X_train, X_val, Y_train, Y_val = train_test_split(X_train, y_train, test_size = 0.2, random_state = 64 ) | Digit Recognizer |
11,828,766 | submission = pd.DataFrame(columns=['Id','Prediction'])
submission['Id'] = test_data['Id']
rf_pred = rf_regressor.predict(test_data.drop('Id', axis=1))
submission['Prediction'] = np.expm1(rf_pred)
submission.to_csv('submission_rf.csv',index=False )<import_modules> | history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size = 64),
epochs = 50, steps_per_epoch = X_train.shape[0]//64, validation_data =(X_val, Y_val), verbose=1 ) | Digit Recognizer |
11,828,766 | for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
<load_from_csv> | predictions = model.predict(X_test)
predictions = np.argmax(predictions, axis = 1)
predictions = pd.Series(predictions, name = "Label" ) | Digit Recognizer |
11,828,766 | <prepare_x_and_y><EOS> | submission = pd.concat([pd.Series(range(1, 28001), name = "ImageId"), predictions], axis = 1)
submission.to_csv("MNIST_top_CNN_submission.csv", index = False ) | Digit Recognizer |
11,950,572 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_on_grid> | import numpy
import pandas
import matplotlib.pyplot as plt
from keras.utils import to_categorical
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping | Digit Recognizer |
11,950,572 | model_grid = {'max_depth': [None, 8], 'min_samples_split': [4,9,16], 'min_samples_leaf':[1,4], 'max_features':['sqrt', 0.5, None]}
type_model = ExtraTreesClassifier(n_estimators=25, random_state=SEED)
grid = RandomizedSearchCV(type_model, model_grid, n_iter=10, cv=5, scoring="roc_auc")
grid.fit(X, y)
print("Best parameters for Type Model:")
print(grid.best_params_)
type_model.set_params(**grid.best_params_)
type_model.fit(X, y)
imputations = type_model.predict(tofill.drop(["T_FC", "T_IL", "revenue"], axis=1))
df.loc[(df.T_FC==0)&(df.T_IL==0), "T_FC"] = imputations
df = df.drop(["T_IL"], axis=1)
print("% labeled FC in the training set:"), df.T_FC.mean()
print("% of imputed values labeled FC:"), np.mean(imputations )<categorify> | train_data = pandas.read_csv('.. /input/digit-recognizer/train.csv')
test_data = pandas.read_csv('.. /input/digit-recognizer/test.csv' ) | Digit Recognizer |
11,950,572 | print("Pre-binarizing columns:"), len(df.columns)
for col in df.columns:
if col[0] == 'P':
print(col), len(df[col].unique()), "unique values"
df = df.join(pd.get_dummies(df[col], prefix=col))
df = df.drop([col, df.columns[-1]], axis=1)
print("Post-binarizing columns:"), len(df.columns )<prepare_x_and_y> | train_y = to_categorical(train_data["label"])
train_x = train_data.loc[:, train_data.columns != "label"]
train_x /= 256 | Digit Recognizer |
11,950,572 | train = df.loc[pd.notnull(df.revenue)]
test = df.loc[pd.isnull(df.revenue)].drop(['revenue'], axis=1)
y = train.revenue.apply(np.sqrt)
X = train.drop(["revenue"], axis=1 )<train_on_grid> | callback = EarlyStopping(monitor='loss', patience=8, restore_best_weights=True ) | Digit Recognizer |
11,950,572 | model_grid = [{'normalize': [True, False], 'alpha': np.logspace(0,10)}]
model = Ridge()
grid = GridSearchCV(model, model_grid, cv=LeaveOneOut() , scoring='neg_mean_squared_error')
grid.fit(X, y)
print("Best parameters set found on development set:")
print(grid.best_params_)
model.set_params(**grid.best_params_)
model.fit(X, y )<save_to_csv> | model = Sequential()
model.add(Conv2D(64,(3,3), activation='relu', input_shape=(28,28, 1)))
model.add(MaxPooling2D(( 2,2)))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(MaxPooling2D(( 2,2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu'))
model.add(Dense(10, activation='sigmoid')) | Digit Recognizer |
11,950,572 | submission = pd.DataFrame(columns=['Prediction'],index=test.index, data=model.predict(test))
submission.Prediction = submission.Prediction.apply(np.square)
submission.index.name='Id'
submission.to_csv("TFI_Ridge.csv")
submission.describe().astype(int )<set_options> | model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] ) | Digit Recognizer |
11,950,572 | plt.style.use("seaborn-whitegrid")
plt.rc("figure", autolayout=True)
plt.rc(
"axes",
labelweight="bold",
labelsize="large",
titleweight="bold",
titlesize=14,
titlepad=10,
)
warnings.filterwarnings('ignore' )<load_from_csv> | datagen = ImageDataGenerator(
rotation_range=10,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1
)
datagen.fit(train_x ) | Digit Recognizer |
11,950,572 | def load_data() :
data_dir = Path(".. /input/house-prices-advanced-regression-techniques/")
df_train = pd.read_csv(data_dir / "train.csv", index_col="Id")
df_test = pd.read_csv(data_dir / "test.csv", index_col="Id")
df = pd.concat([df_train, df_test])
df = clean(df)
df = encode(df)
df = impute(df)
df_train = df.loc[df_train.index, :]
df_test = df.loc[df_test.index, :]
return df_train, df_test<load_from_csv> | history = model.fit(datagen.flow(train_x, train_y, batch_size=32), epochs=80, callbacks=[callback] ) | Digit Recognizer |
11,950,572 | data_dir = Path(".. /input/house-prices-advanced-regression-techniques/")
df = pd.read_csv(data_dir / "train.csv", index_col="Id")
df.Exterior2nd.unique()<feature_engineering> | test_data /= 256
test_x = test_data.values.reshape(-1, 28, 28, 1)
y_pred = model.predict(test_x)
y_pred = numpy.argmax(y_pred, axis=1)
y_pred = pandas.Series(y_pred,name='Label')
submission = pandas.concat([pandas.Series(range(1, 28001), name='ImageId'), y_pred], axis=1 ) | Digit Recognizer |
11,950,572 | def clean(df):
df["Exterior2nd"] = df["Exterior2nd"].replace({"Brk Cmn": "BrkComm"})
df["GarageYrBlt"] = df["GarageYrBlt"].where(df.GarageYrBlt <= 2010, df.YearBuilt)
df.rename(columns={
"1stFlrSF": "FirstFlrSF",
"2ndFlrSF": "SecondFlrSF",
"3SsnPorch": "Threeseasonporch",
},inplace=True
)
return df<define_variables> | submission.to_csv('my_submission.csv', index=False ) | Digit Recognizer |
11,870,532 | features_nom = [
"MSSubClass",
"MSZoning",
"Street",
"Alley",
"LandContour",
"LotConfig",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"Foundation",
"Heating",
"CentralAir",
"GarageType",
"MiscFeature",
"SaleType",
"SaleCondition"
]
five_levels = ["Po", "Fa", "TA", "Gd", "Ex"]
ten_levels = list(range(10))
ordered_levels = {
"OverallQual": ten_levels,
"OverallCond": ten_levels,
"ExterQual": five_levels,
"ExterCond": five_levels,
"BsmtQual": five_levels,
"BsmtCond": five_levels,
"HeatingQC": five_levels,
"KitchenQual": five_levels,
"FireplaceQu": five_levels,
"GarageQual": five_levels,
"GarageCond": five_levels,
"PoolQC": five_levels,
"LotShape": ["Reg", "IR1", "IR2", "IR3"],
"LandSlope": ["Sev", "Mod", "Gtl"],
"BsmtExposure": ["No", "Mn", "Av", "Gd"],
"BsmtFinType1": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"],
"BsmtFinType2": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"],
"Functional": ["Sal", "Sev", "Maj1", "Maj2", "Mod", "Min2", "Min1", "Typ"],
"GarageFinish": ["Unf", "RFn", "Fin"],
"PavedDrive": ["N", "P", "Y"],
"Utilities": ["NoSeWa", "NoSewr", "AllPub"],
"CentralAir": ["N", "Y"],
"Electrical": ["Mix", "FuseP", "FuseF", "FuseA", "SBrkr"],
"Fence": ["MnWw", "GdWo", "MnPrv", "GdPrv"]
}
ordered_levels = {key: ["None"] + value for key, value in
ordered_levels.items() }
def encode(df):
for name in features_nom:
df[name] = df[name].astype("category")
if "None" not in df[name].cat.categories:
df[name].cat.add_categories("None", inplace=True)
for name, levels in ordered_levels.items() :
df[name] = df[name].astype(CategoricalDtype(levels,
ordered=True))
return df<categorify> | X_train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv')
X_test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv')
print('Shape of the training data: ', X_train.shape)
print('Shape of the test data: ', X_test.shape ) | Digit Recognizer |
11,870,532 | def impute(df):
for name in df.select_dtypes("number"):
df[name] = df[name].fillna(0)
for name in df.select_dtypes("category"):
df[name] = df[name].fillna("None")
return df<split> | y_train = X_train['label']
X_train.drop(labels = ['label'], axis=1, inplace=True ) | Digit Recognizer |
11,870,532 | df_train, df_test = load_data()<compute_train_metric> | print('Null values in training data: ',X_train.isna().any().sum())
print('Null values in test data: ',X_test.isna().any().sum())
X_train = X_train / 255.0
X_test = X_test / 255.0 | Digit Recognizer |
11,870,532 | def score_dataset(X, y, model=XGBRegressor()):
for colname in X.select_dtypes(["category"]):
X[colname] = X[colname].cat.codes
log_y = np.log(y)
score = cross_val_score(
model,
X,
log_y,
cv=5,
scoring="neg_mean_squared_error"
)
score = -1 * score.mean()
score = np.sqrt(score)
return score<compute_test_metric> | y_train = to_categorical(y_train, num_classes=10 ) | Digit Recognizer |
11,870,532 | X = df_train.copy()
y = X.pop("SalePrice")
baseline_score = score_dataset(X, y)
print(f"Baseline score: {baseline_score:.5f} RMSLE" )<prepare_x_and_y> | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size = 0.1 ) | Digit Recognizer |
11,870,532 | X = df_train.copy()
y = X.pop("SalePrice")
mi_scores = make_mi_scores(X, y)
mi_scores.head()<drop_column> | model = keras.models.Sequential([layers.Conv2D(32,(3,3), activation="relu",padding='same', input_shape=(28,28,1)) ,
layers.MaxPooling2D(2,2),
layers.Dropout(0.25),
layers.Conv2D(64,(3,3), activation="relu",padding='same'),
layers.MaxPooling2D(2,2),
layers.Dropout(0.25),
layers.Flatten() ,
layers.BatchNormalization() ,
layers.Dense(128, activation="elu", kernel_initializer="he_normal"),
layers.BatchNormalization() ,
layers.Dense(32, activation="elu", kernel_initializer="he_normal"),
layers.BatchNormalization() ,
layers.Dense(10, activation="softmax")])
model.summary() | Digit Recognizer |
11,870,532 | def drop_uninformative(df, mi_scores):
return df.loc[:, mi_scores > 0.0]<create_dataframe> | optimizer = Adam(learning_rate=0.001, epsilon=1e-07)
model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics=['accuracy'])
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=10, verbose=0, mode='auto')
mcp = ModelCheckpoint('.mdl_wts.hdf5', save_best_only=True, monitor='val_accuracy', mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='auto' ) | Digit Recognizer |
11,870,532 | X = df_train.copy()
y = X.pop("SalePrice")
X = drop_uninformative(X, mi_scores)
score_dataset(X, y )<categorify> | datagen = ImageDataGenerator(
rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=5,
zoom_range=0.1)
datagen.fit(X_train ) | Digit Recognizer |
11,870,532 | def label_encode(df):
X = df.copy()
for colname in X.select_dtypes(["category"]):
X[colname] = X[colname].cat.codes
return X<categorify> | Batch_size=100
Epochs = 100
history = model.fit_generator(datagen.flow(X_train, y_train, batch_size=Batch_size),
epochs = Epochs,
validation_data =(X_val,y_val),
verbose = 2,
steps_per_epoch=X_train.shape[0]//Batch_size,
callbacks = [earlyStopping, mcp, reduce_lr_loss] ) | Digit Recognizer |
11,870,532 | def mathematical_transforms(df):
X = pd.DataFrame()
X["LivLotRatio"] = df.GrLivArea / df.LotArea
X["Spaciousness"] =(df.FirstFlrSF + df.SecondFlrSF)/ df.TotRmsAbvGrd
return X
def interactions(df):
X = pd.get_dummies(df.BldgType, prefix="Bldg")
X = X.mul(df.GrLivArea, axis=0)
return X
def counts(df):
X = pd.DataFrame()
X["PorchTypes"] = df[[
"WoodDeckSF",
"OpenPorchSF",
"EnclosedPorch",
"Threeseasonporch",
"ScreenPorch"
]].gt(0.0 ).sum(axis=1)
return X
def break_down(df):
X = pd.DataFrame()
X["MSClass"] = df.MSSubClass.str.split("_", n=1, expand=True)[0]
return X
def group_transforms(df):
X = pd.DataFrame()
X["MedNhbdArea"] = df.groupby("Neighborhood")["GrLivArea"].transform("median")
X["MedCondQual"] = df.groupby("OverallQual")["OverallCond"].transform("median")
X["MedBsmt"] = df.groupby("BsmtQual")["TotalBsmtSF"].transform("median")
return X<create_dataframe> | model.load_weights(filepath = '.mdl_wts.hdf5')
scores = model.evaluate(X_val, y_val, callbacks = [earlyStopping, mcp, reduce_lr_loss] ) | Digit Recognizer |
11,870,532 | def apply_pca(X, standardize=True):
if standardize:
X =(X - X.mean(axis=0)) / X.std(axis=0)
pca = PCA()
X_pca = pca.fit_transform(X)
component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])]
X_pca = pd.DataFrame(X_pca, column=component_names)
loadings = pd.DataFrame(
pca.components_.T,
columns=component_names,
index=X.columns,
)
return pca, X_pca, loadings
def plot_variance(pca, width=8, dpi=100):
fig, axs = plt.subplots(1, 2)
n = pca.n_components_
grid = np.arange(1, n + 1)
evr = pca.explained_variance_ratio_
axs[0].bar(grid, evr)
axs[0].set(
xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0)
)
cv = np.cumsum(evr)
axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-")
axs[1].set(
xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0)
)
fig.set(figwidth=8, dpi=100)
return axs<feature_engineering> | for layer in model.layers:
if'conv' in layer.name:
filters, biases = layer.get_weights()
print('Layer: ', layer.name, filters.shape)
print('Filter size:(', filters.shape[0], ',', filters.shape[1], ')')
print('Channels in this layer: ', filters.shape[2])
print('Number of filters: ', filters.shape[3])
count = 1
plt.figure(figsize =(18, 4))
for i in range(filters.shape[3]):
ax= plt.subplot(4, filters.shape[3]/4, count)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(filters[:,:,0, i], cmap=plt.cm.binary)
count+=1
plt.show() | Digit Recognizer |
11,870,532 | def pca_inspired(df):
X = pd.DataFrame()
X["Feature1"] = df.GrLivArea + df.TotalBsmtSF
X["Feature2"] = df.YearRemodAdd * df.TotalBsmtSF
return X
def pca_components(df, features):
X = df.loc[:, features]
_, X_pca, _ = apply_pca(X)
return X_pca
pca_features = [
"GarageArea",
"YearRemodAdd",
"TotalBsmtSF",
"GrLivArea",
]<create_dataframe> | print('total number of layers',len(model.layers)) | Digit Recognizer |
11,870,532 | def indicate_outliers(df):
X_new = pd.DataFrame()
X_new["Outlier"] =(df.Neighborhood == "Edwards")&(df.SaleCondition == "Partial")
return X_new<categorify> | layer_outputs = [layer.output for layer in model.layers[0:6]]
activation_model = models.Model(inputs = model.input, outputs = layer_outputs ) | Digit Recognizer |
11,870,532 | class CrossFoldEncoder:
def __init__(self, encoder, **kwargs):
self.encoder_ = encoder
self.kwargs_ = kwargs
self.cv_ = KFold(n_splits=5)
def fit_transform(self, X, y, cols):
self.fitted_encoders_ = []
self.cols_ = cols
X_encoded = []
for idx_encode, idx_train in self.cv_.split(X):
fitted_encoder = self.encoder_(cols=cols, **self.kwargs_)
fitted_encoder.fit(
X.iloc[idx_encode, :], y.iloc[idx_encode],
)
X_encoded.append(fitted_encoder.transform(X.iloc[idx_train, :])[cols])
self.fitted_encoders_.append(fitted_encoder)
X_encoded = pd.concat(X_encoded)
X_encoded.columns = [name + "_encoded" for name in X_encoded.columns]
return X_encoded
def transform(self, X):
X_encoded_list = []
for fitted_encoder in self.fitted_encoders_:
X_encoded = fitted_encoder.transform(X)
X_encoded_list.append(X_encoded[self.cols_])
X_encoded = reduce(
lambda x, y: x.add(y, fill_value=0), X_encoded_list
)/ len(X_encoded_list)
X_encoded.columns = [name + "_encoded" for name in X_encoded.columns]
return X_encoded<categorify> | img_tensor = X_test[4].reshape(-1, 28, 28, 1)
activations = activation_model.predict(img_tensor ) | Digit Recognizer |
11,870,532 | <prepare_x_and_y><EOS> | submissions = pd.DataFrame({"ImageId": list(range(1,len(test_pred)+1)) ,
"Label": test_pred})
submissions.to_csv("submission.csv", index=False, header=True ) | Digit Recognizer |
10,345,485 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<init_hyperparams> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.models import Sequential
from keras.layers import Conv2D, Lambda, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
| Digit Recognizer |
10,345,485 | def objective(trial):
xgb_params = dict(
max_depth=trial.suggest_int("max_depth", 2, 10),
learning_rate=trial.suggest_float("learning_rate", 1e-4, 1e-1, log=True),
n_estimators=trial.suggest_int("n_estimators", 1000, 8000),
min_child_weight=trial.suggest_int("min_child_weight", 1, 10),
colsample_bytree=trial.suggest_float("colsample_bytree", 0.2, 1.0),
subsample=trial.suggest_float("subsample", 0.2, 1.0),
reg_alpha=trial.suggest_float("reg_alpha", 1e-4, 1e2, log=True),
reg_lambda=trial.suggest_float("reg_lambda", 1e-4, 1e2, log=True),
)
xgb = XGBRegressor(tree_method="gpu_hist", gpu_id=0, **xgb_params)
return score_dataset(X_train, y_train, xgb)
study = optuna.create_study(direction="minimize")
study.optimize(objective, n_trials=20)
xgb_params = study.best_params<save_to_csv> | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
sub = pd.read_csv('.. /input/sample_submission.csv')
| Digit Recognizer |
10,345,485 | X_train, X_test = create_features(df_train, df_test)
y_train = df_train.loc[:, "SalePrice"]
xgb = XGBRegressor(tree_method="gpu_hist", gpu_id=0, **xgb_params)
xgb.fit(X_train, np.log(y))
predictions = np.exp(xgb.predict(X_test))
output = pd.DataFrame({'Id': X_test.index, 'SalePrice': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" )<import_modules> | print(f"Training data size is {train.shape}
Testing data size is {test.shape}" ) | Digit Recognizer |
10,345,485 | warnings.filterwarnings('ignore' )<compute_test_metric> | X = train.drop(['label'], 1 ).values
y = train['label'].values
test_x = test.values | Digit Recognizer |
10,345,485 | def evaluation_metric(predicted_value, actual_value)-> float:
rmse = mean_squared_error(actual_value,predicted_value, squared=False)
return rmse
def verify_remodelling(remodelled_year:int, built_year:int):
if remodelled_year > built_year:
return True
elif remodelled_year == built_year:
return False
else:
return -1
def selling_period(month_sold: int)-> str:
if month_sold >= 3 and month_sold <= 5:
return 'spring'
elif month_sold >= 6 and month_sold <= 8:
return 'summer'
elif month_sold >= 9 and month_sold <= 11:
return 'autumn'
else:
return 'winter'
RANDOM_STATE = 0<load_from_csv> | X = X / 255.0
test_x = test_x / 255.0 | Digit Recognizer |
10,345,485 | train_set = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
test_set = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv' )<feature_engineering> | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=0 ) | Digit Recognizer |
10,345,485 | train_set['total_square_footage'] = train_set['TotalBsmtSF'] + train_set['GrLivArea']
test_set['total_square_footage'] = test_set['TotalBsmtSF'] + test_set['GrLivArea']
train_set['remodelling_done'] = train_set.apply(lambda x: verify_remodelling(x['YearRemodAdd'], x['YearBuilt']), axis=1)
test_set['remodelling_done'] = test_set.apply(lambda x: verify_remodelling(x['YearRemodAdd'], x['YearBuilt']), axis=1)
train_set['selling_season'] = train_set.apply(lambda x: selling_period(x['MoSold']), axis=1)
test_set['selling_season'] = test_set.apply(lambda x: selling_period(x['MoSold']), axis=1)
train_set['total_area_of_house'] = train_set['TotalBsmtSF'] + train_set['1stFlrSF'] + train_set['2ndFlrSF']
test_set['total_area_of_house'] = test_set['TotalBsmtSF'] + test_set['1stFlrSF'] + test_set['2ndFlrSF']
train_set['age_of_building'] = train_set['YearBuilt'].apply(lambda x: pd.datetime.now().year - x)
test_set['age_of_building'] = test_set['YearBuilt'].apply(lambda x: pd.datetime.now().year - x)
<count_missing_values> | mean = np.mean(X_train)
std = np.std(X_train)
def standardize(x):
return(x-mean)/std | Digit Recognizer |
10,345,485 | train_set[ordinal_cat_features].isnull().sum()<categorify> | epochs = 50
batch_size = 64 | Digit Recognizer |
10,345,485 | for col in ordinal_cat_features:
train_set[col] = train_set[col].map(ordinal_ranking)
test_set[col] = test_set[col].map(ordinal_ranking)
train_set[col] = train_set[col].fillna(0)
test_set[col] = test_set[col].fillna(0 )<count_missing_values> | model=Sequential()
model.add(Conv2D(filters=64, kernel_size =(3,3), activation="relu", input_shape=(28,28,1)))
model.add(Conv2D(filters=64, kernel_size =(3,3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu"))
model.add(Conv2D(filters=128, kernel_size =(3,3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(BatchNormalization())
model.add(Conv2D(filters=256, kernel_size =(3,3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(512,activation="relu"))
model.add(Dense(10,activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"] ) | Digit Recognizer |
10,345,485 | train_set[continious_features].isnull().sum()<count_missing_values> | datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
train_gen = datagen.flow(X_train, y_train, batch_size=batch_size)
test_gen = datagen.flow(X_test, y_test, batch_size=batch_size ) | Digit Recognizer |
10,345,485 | test_set[continious_features].isnull().sum()<feature_engineering> | history = model.fit_generator(train_gen,
epochs = epochs,
steps_per_epoch = X_train.shape[0] // batch_size,
validation_data = test_gen,
validation_steps = X_test.shape[0] // batch_size ) | Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.