kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
3,593,820 | test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, Tourney_Seeds, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Seed':'Seed2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1 )<save_to_csv> | def get_coords(imgpnts):
return(( imgpnts+1)*48 ).numpy() | Facial Keypoints Detection |
3,593,820 | test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT1'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df = pd.merge(test_df, RegularSeason_Compact_Results_Final, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left')
test_df.rename(columns={'Score':'ScoreT2'}, inplace=True)
test_df = test_df.drop('TeamID', axis=1)
test_df
test_df.to_csv('test_df_Test.csv', index=False )<data_type_conversions> | test_img = open_image(test_path/'1600.jpg')
pred = learn.predict(test_img)
test_img.show(y=pred[0] ) | Facial Keypoints Detection |
3,593,820 | test_df['Seed1'] = test_df['Seed1'].str.extract('(\d+)', expand=True)
test_df['Seed2'] = test_df['Seed2'].str.extract('(\d+)', expand=True)
test_df.Seed1 = pd.to_numeric(test_df.Seed1, errors='coerce')
test_df.Seed2 = pd.to_numeric(test_df.Seed2, errors='coerce' )<feature_engineering> | test_img = open_image(test_path/'1600.jpg')
pred = learn.predict(test_img)
test_img.show(y=ImagePoints(FlowField(test_img.size,torch.from_numpy(get_coords(pred[1])))) ) | Facial Keypoints Detection |
3,593,820 | test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2']
test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2']
test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1)
test_df<prepare_x_and_y> | a=list(train_csv.columns.values)
a.remove('Image')
a.append('ImageId' ) | Facial Keypoints Detection |
3,593,820 | X = tourney_result_Final1.drop('result', axis=1)
y = tourney_result_Final1.result<normalization> | test_preds = pd.DataFrame(columns=a ) | Facial Keypoints Detection |
3,593,820 | df = pd.concat([X, test_df], axis=0, sort=False ).reset_index(drop=True)
df_log = pd.DataFrame(
preprocessing.MinMaxScaler().fit_transform(df),
columns=df.columns,
index=df.index
)
train_log, test_log = df_log.iloc[:len(X),:], df_log.iloc[len(X):,:].reset_index(drop=True )<train_on_grid> | f = IntProgress(min=0, max=test_csv.count() [0])
display(f)
for test_index in range(test_csv.count() [0]):
timg = open_image(test_path/(str(test_index)+'.jpg'))
pred = learn.predict(timg)
a=np.abs(flp(get_coords(pred[1] ).reshape(1,-1)[0]))
a=np.append(a,test_csv.loc[test_index].ImageId)
test_preds.loc[test_index]=a
f.value+=1 | Facial Keypoints Detection |
3,593,820 | logreg = LogisticRegression()
logreg.fit(train_log, y)
coeff_logreg = pd.DataFrame(train_log.columns.delete(0))
coeff_logreg.columns = ['feature']
coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0])
coeff_logreg.sort_values(by='score_logreg', ascending=False )<predict_on_test> | test_preds.ImageId=test_preds.ImageId.astype('int')
test_preds.head() | Facial Keypoints Detection |
3,593,820 | y_logreg_train = logreg.predict(train_log)
y_logreg_pred = logreg.predict_proba(test_log )<train_model> | sub = pd.DataFrame(columns=['RowId','Location'])
for index,row in id_lookup.iterrows() :
fname = row.FeatureName
trow=test_preds.loc[test_preds['ImageId']==row.ImageId]
sub.loc[index]=[row.RowId,trow.iloc[0][fname]] | Facial Keypoints Detection |
3,593,820 | clf = RandomForestClassifier(n_estimators=200,max_depth=50)
clf.fit(train_log, y)
clf_probs = clf.predict_proba(test_log )<prepare_output> | sub.RowId=sub.RowId.astype('int')
sub.head() | Facial Keypoints Detection |
3,593,820 | <load_from_csv><EOS> | sub.to_csv("sub.csv",index=False ) | Facial Keypoints Detection |
10,870,991 | <SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<save_to_csv> | !unzip.. /input/facial-keypoints-detection/test.zip
!unzip.. /input/facial-keypoints-detection/training.zip | Facial Keypoints Detection |
10,870,991 | submission_df.to_csv('submission_New8.csv', index=False )<set_options> | %matplotlib inline | Facial Keypoints Detection |
10,870,991 | pd.set_option('max_columns', None)
plt.style.use('fivethirtyeight')
%matplotlib inline
py.init_notebook_mode(connected=True)
warnings.filterwarnings('ignore')
print("Libraries imported!" )<train_model> | device = torch.device('cuda:0' ) | Facial Keypoints Detection |
10,870,991 | class BaseModel(object):
def __init__(self, train_df, test_df, target, features, categoricals=[],
n_splits=3, cv_method="KFold", group=None, task="regression",
parameter_tuning=False, scaler=None, verbose=True):
self.train_df = train_df
self.test_df = test_df
self.target = target
self.features = features
self.n_splits = n_splits
self.categoricals = categoricals
self.cv_method = cv_method
self.group = group
self.task = task
self.parameter_tuning = parameter_tuning
self.scaler = scaler
self.cv = self.get_cv()
self.verbose = verbose
self.params = self.get_params()
self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit()
def train_model(self, train_set, val_set):
raise NotImplementedError
def get_params(self):
raise NotImplementedError
def convert_dataset(self, x_train, y_train, x_val, y_val):
raise NotImplementedError
def convert_x(self, x):
return x
def calc_metric(self, y_true, y_pred):
if self.task == "classification":
return log_loss(y_true, y_pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(y_true, y_pred))
def get_cv(self):
if self.cv_method == "KFold":
cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df)
elif self.cv_method == "StratifiedKFold":
cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target])
elif self.cv_method == "TimeSeriesSplit":
cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits)
return cv.split(self.train_df)
elif self.cv_method == "GroupKFold":
cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target], self.group)
elif self.cv_method == "StratifiedGroupKFold":
cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target], self.group)
def fit(self):
oof_pred = np.zeros(( self.train_df.shape[0],))
y_vals = np.zeros(( self.train_df.shape[0],))
y_pred = np.zeros(( self.test_df.shape[0],))
if self.group is not None:
if self.group in self.features:
self.features.remove(self.group)
if self.group in self.categoricals:
self.categoricals.remove(self.group)
fi = np.zeros(( self.n_splits, len(self.features)))
if self.scaler is not None:
numerical_features = [f for f in self.features if f not in self.categoricals]
self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median())
self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median())
self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0])
self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0])
if self.scaler == "MinMax":
scaler = MinMaxScaler()
elif self.scaler == "Standard":
scaler = StandardScaler()
df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True)
scaler.fit(df[numerical_features])
x_test = self.test_df.copy()
x_test[numerical_features] = scaler.transform(x_test[numerical_features])
x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]]
else:
x_test = self.test_df[self.features]
for fold,(train_idx, val_idx)in enumerate(self.cv):
x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features]
y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target]
if self.scaler is not None:
x_train[numerical_features] = scaler.transform(x_train[numerical_features])
x_val[numerical_features] = scaler.transform(x_val[numerical_features])
x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]]
x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]]
train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)
model, importance = self.train_model(train_set, val_set)
fi[fold, :] = importance
conv_x_val = self.convert_x(x_val)
y_vals[val_idx] = y_val
oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape)
x_test = self.convert_x(x_test)
y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits
print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx])))
fi_df = pd.DataFrame()
for n in np.arange(self.n_splits):
tmp = pd.DataFrame()
tmp["features"] = self.features
tmp["importance"] = fi[n, :]
tmp["fold"] = n
fi_df = pd.concat([fi_df, tmp], ignore_index=True)
gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index()
fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean'))
loss_score = self.calc_metric(self.train_df[self.target], oof_pred)
if self.verbose:
print('Our oof loss score is: ', loss_score)
return y_pred, loss_score, model, oof_pred, y_vals, fi_df
def plot_feature_importance(self, rank_range=[1, 50]):
fig, ax = plt.subplots(1, 1, figsize=(10, 20))
sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]]
sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h')
ax.set_xlabel("feature importance")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return sorted_df<train_model> | class FacialKeypoiuntsTrainDataset(Dataset):
def __init__(self, data_file, transform=None):
self.transform = transform
dataset = pd.read_csv(data_file)
dataset.fillna(method='ffill', inplace=True)
self.images = dataset['Image']
dataset = dataset.drop(['Image'], axis=1)
self.positions_name = list(dataset.columns)
self.positions = dataset.to_numpy()
def __len__(self):
return len(self.positions)
def __getitem__(self, index):
x = self.images[index]
x = np.array([float(x)for x in x.split(' ')])
x = x.reshape(( 96, 96))
img = np.stack(( x, x, x), axis=-1)/ 255.
y = self.positions[index]
sample = {'img': img, 'landmarks': y}
if self.transform:
sample = self.transform(sample)
return sample
def show_samples(self, indices, title=None, count=10):
plt.figure(figsize=(count*3, 3))
display_indices = indices[:count]
if title:
plt.suptitle(title)
for i, index in enumerate(display_indices):
sample = self.__getitem__(index)
x, y = sample['img'], sample['landmarks']
x = np.asarray(x)
y = np.asarray(y)
y = y.reshape(( y.size // 2, 2))
plt.subplot(1, count, i + 1)
if self.transform:
plt.imshow(np.transpose(x,(1, 2, 0)))
else:
plt.imshow(x)
plt.scatter(y[:, 0], y[:, 1], s=15, marker='.', c='r')
plt.grid(False)
plt.axis('off' ) | Facial Keypoints Detection |
10,870,991 | class LgbModel(BaseModel):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity)
fi = model.feature_importance(importance_type="gain")
return model, fi
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)
val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)
return train_set, val_set
def get_params(self):
params = {
'num_leaves': 127,
'min_data_in_leaf': 50,
'max_depth': -1,
'learning_rate': 0.005,
"boosting_type": "gbdt",
"bagging_seed": 11,
"verbosity": -1,
'random_state': 42,
}
if self.task == "regression":
params["objective"] = "regression"
params["metric"] = "rmse"
elif self.task == "classification":
params["objective"] = "binary"
params["metric"] = "binary_logloss"
if self.parameter_tuning == True:
def objective(trial):
train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features],
self.train_df[self.target],
test_size=0.3, random_state=42)
dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals)
dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals)
hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024),
'boosting_type': 'gbdt',
'objective': params["objective"],
'metric': params["metric"],
'max_depth': trial.suggest_int('max_depth', 4, 16),
'min_child_weight': trial.suggest_int('min_child_weight', 1, 20),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'early_stopping_rounds': 100
}
model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500)
pred = model.predict(test_x)
if self.task == "classification":
return log_loss(test_y, pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(test_y, pred))
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=50)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items() :
print(' {}: {}'.format(key, value))
params = trial.params
params["learning_rate"] = 0.001
plot_optimization_history(study)
return params<train_model> | class FacialKeypoiuntsTestDataset(Dataset):
def __init__(self, data_file):
self.dataset = pd.read_csv(data_file)
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
data_sr = self.dataset.iloc[index]
img_id = int(data_sr['ImageId'])
x = data_sr['Image']
x = np.array([float(x)for x in x.split(' ')])
x = x.reshape(( 96, 96))
img = np.stack(( x, x, x), axis=-1)/ 255.
img = np.transpose(img,(2, 0, 1)).copy()
img_t = torch.from_numpy(img ).type(torch.FloatTensor)
return img_t, img_id
def show_samples(self, indices, title=None, count=10):
plt.figure(figsize=(count*3, 3))
display_indices = indices[:count]
if title:
plt.suptitle(title)
for i, index in enumerate(display_indices):
x, y = self.__getitem__(index)
x = np.asarray(x)
plt.subplot(1, count, i + 1)
plt.imshow(np.transpose(x,(1, 2, 0)))
plt.title(f'Image Id {y}')
plt.grid(False)
plt.axis('off' ) | Facial Keypoints Detection |
10,870,991 | class CatbModel(BaseModel):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
if self.task == "regression":
model = CatBoostRegressor(**self.params)
elif self.task == "classification":
model = CatBoostClassifier(**self.params)
model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']),
verbose=verbosity, cat_features=self.categoricals)
return model, model.get_feature_importance()
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
params = { 'task_type': "CPU",
'learning_rate': 0.01,
'iterations': 1000,
'random_seed': 42,
'use_best_model': True
}
if self.task == "regression":
params["loss_function"] = "RMSE"
elif self.task == "classification":
params["loss_function"] = "Logloss"
return params<normalization> | train_dataset = FacialKeypoiuntsTrainDataset('./training.csv')
test_dataset = FacialKeypoiuntsTestDataset('./test.csv')
train_plot_indices = np.random.choice(len(train_dataset), 10)
test_plot_indices = np.random.choice(len(test_dataset), 10)
train_dataset.show_samples(
train_plot_indices, title='Samples from Facial Keypoints Train Dataset', count=7)
test_dataset.show_samples(
test_plot_indices, title='Samples from Facial Keypoints Test Dataset', count=7 ) | Facial Keypoints Detection |
10,870,991 | class Mish(Layer):
def __init__(self, **kwargs):
super(Mish, self ).__init__(**kwargs)
def build(self, input_shape):
super(Mish, self ).build(input_shape)
def call(self, x):
return x * K.tanh(K.softplus(x))
def compute_output_shape(self, input_shape):
return input_shape
class LayerNormalization(keras.layers.Layer):
def __init__(self,
center=True,
scale=True,
epsilon=None,
gamma_initializer='ones',
beta_initializer='zeros',
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs):
super(LayerNormalization, self ).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'gamma_initializer': keras.initializers.serialize(self.gamma_initializer),
'beta_initializer': keras.initializers.serialize(self.beta_initializer),
'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer),
'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer),
'gamma_constraint': keras.constraints.serialize(self.gamma_constraint),
'beta_constraint': keras.constraints.serialize(self.beta_constraint),
}
base_config = super(LayerNormalization, self ).get_config()
return dict(list(base_config.items())+ list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, input_mask=None):
return input_mask
def build(self, input_shape):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name='gamma',
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta',
)
super(LayerNormalization, self ).build(input_shape)
def call(self, inputs, training=None):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs =(inputs - mean)/ std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs<train_model> | batch_size = 32
data_size = len(train_dataset)
validation_fraction =.2
val_split_size = int(np.floor(validation_fraction * data_size))
indeces = list(range(data_size))
np.random.shuffle(indeces)
val_indeces, train_indeces = indeces[:val_split_size], indeces[val_split_size:]
test_indeces = list(range(len(test_dataset)))
print(f'train size = {len(train_indeces)}, validation size = {len(val_indeces)}')
print(f'test size = {len(test_indeces)}')
train_sampler = SubsetRandomSampler(train_indeces)
val_sampler = SubsetRandomSampler(val_indeces)
test_sampler = SubsetRandomSampler(test_indeces)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
sampler=val_sampler)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size,
sampler=test_sampler ) | Facial Keypoints Detection |
10,870,991 | class NeuralNetworkModel(BaseModel):
def train_model(self, train_set, val_set):
inputs = []
embeddings = []
embedding_out_dim = self.params['embedding_out_dim']
n_neuron = self.params['hidden_units']
for i in self.categoricals:
input_ = Input(shape=(1,))
embedding = Embedding(int(np.absolute(self.train_df[i] ).max() + 1), embedding_out_dim, input_length=1 )(input_)
embedding = Reshape(target_shape=(embedding_out_dim,))(embedding)
inputs.append(input_)
embeddings.append(embedding)
input_numeric = Input(shape=(len(self.features)- len(self.categoricals),))
embedding_numeric = Dense(n_neuron )(input_numeric)
embedding_numeric = Mish()(embedding_numeric)
inputs.append(input_numeric)
embeddings.append(embedding_numeric)
x = Concatenate()(embeddings)
for i in np.arange(self.params['hidden_layers'] - 1):
x = Dense(n_neuron //(2 *(i+1)) )(x)
x = Mish()(x)
x = Dropout(self.params['hidden_dropout'] )(x)
x = LayerNormalization()(x)
if self.task == "regression":
out = Dense(1, activation="linear", name = "out" )(x)
loss = "mse"
elif self.task == "classification":
out = Dense(1, activation='sigmoid', name = 'out' )(x)
loss = "binary_crossentropy"
model = Model(inputs=inputs, outputs=out)
model.compile(loss=loss, optimizer=Adam(lr=1e-04, beta_1=0.9, beta_2=0.999, decay=1e-04))
er = EarlyStopping(patience=10, min_delta=1e-4, restore_best_weights=True, monitor='val_loss')
ReduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')
model.fit(train_set['X'], train_set['y'], callbacks=[er, ReduceLR],
epochs=self.params['epochs'], batch_size=self.params['batch_size'],
validation_data=[val_set['X'], val_set['y']])
fi = np.zeros(len(self.features))
return model, fi
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
params = {
'input_dropout': 0.0,
'hidden_layers': 2,
'hidden_units': 128,
'embedding_out_dim': 4,
'hidden_activation': 'relu',
'hidden_dropout': 0.05,
'batch_norm': 'before_act',
'optimizer': {'type': 'adam', 'lr': 0.001},
'batch_size': 128,
'epochs': 80
}
return params<load_from_csv> | nn_model = models.resnet18(pretrained=True)
nn_model.fc = nn.Linear(nn_model.fc.in_features, 30)
nn_model = nn_model.type(torch.FloatTensor)
nn_model = nn_model.to(device)
*old_params, new_params = nn_model.parameters() | Facial Keypoints Detection |
10,870,991 | data_dict = {}
for i in glob.glob('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WDataFiles_Stage1/*'):
name = i.split('/')[-1].split('.')[0]
if name != 'WTeamSpellings':
data_dict[name] = pd.read_csv(i)
else:
data_dict[name] = pd.read_csv(i, encoding='cp1252' )<feature_engineering> | def train_model(model, train_loader, val_loader, loss, optimizer, scheduler, num_epoch, plot_epoch):
loss_history = []
train_rmse_history = []
val_rmse_history = []
best_model = None
best_val_rmse = None
indices = np.random.choice(batch_size, 10)
for epoch in range(num_epoch):
print(f'Epoch {epoch + 1:2d} / {num_epoch:2d}', end=' ')
model.train()
average_loss = 0
average_rmse = 0
for i_step, sample in enumerate(train_loader):
x, y = sample['img'], sample['landmarks']
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
average_loss += loss_value.item()
rmse = mean_squared_error(
y,
prediction.cpu().detach().numpy() ,
squared=False)
average_rmse += rmse
average_loss = average_loss / i_step
train_rmse = average_rmse / i_step
val_rmse = compute_rmse(model, val_loader)
loss_history.append(average_loss)
train_rmse_history.append(train_rmse)
val_rmse_history.append(val_rmse)
print(f'Loss = {average_loss:.4f}, Train RMSE = {train_rmse:.4f}, Val RMSE = {val_rmse:.4f}')
if best_val_rmse is None:
best_val_rmse = val_rmse
best_model = copy.deepcopy(model)
if val_rmse < best_val_rmse:
best_val_rmse = val_rmse
best_model = copy.deepcopy(model)
scheduler.step()
if(( epoch + 1)% plot_epoch)== 0:
plot_results(model, val_loader, indices, title=f'Examples for epoch {epoch + 1}')
print(f' Best val RMSE = {best_val_rmse:.4f}')
return loss_history, train_rmse_history, val_rmse_history, best_val_rmse, best_model
def compute_rmse(model, val_loader):
average_rmse = 0
model.eval()
for i_step, sample in enumerate(val_loader):
x, y = sample['img'], sample['landmarks']
x_gpu = x.to(device)
with torch.no_grad() :
prediction = model(x_gpu)
rmse = mean_squared_error(
y,
prediction.cpu().detach().numpy() ,
squared=False)
average_rmse += rmse
val_rmse = float(average_rmse)/ i_step
return val_rmse
def plot_results(model, val_loader, indices, title=None, count=10):
plt.figure(figsize=(count*3, 3))
display_indices = indices[:count]
if title:
plt.suptitle(title)
model.eval()
for i_step, samples in enumerate(val_loader):
x_gpu, _ = samples['img'], samples['landmarks']
x_gpu = x_gpu.to(device)
with torch.no_grad() :
prediction = model(x_gpu)
ys = prediction.cpu().detach().numpy()
xs = x_gpu.cpu().detach().numpy()
for i, index in enumerate(display_indices):
x = xs[index]
y = ys[index]
y = y.reshape(( y.size // 2, 2))
plt.subplot(1, count, i + 1)
if x.shape[0] == 3:
plt.imshow(np.transpose(x,(1, 2, 0)))
else:
plt.imshow(x)
plt.scatter(y[:, 0], y[:, 1], s=15, marker='.', c='r')
plt.grid(False)
plt.axis('off')
plt.show()
break | Facial Keypoints Detection |
10,870,991 | data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3]))
data_dict[fname].head()<load_from_csv> | loss = nn.MSELoss().to(device)
optimizer = optim.Adam([
{'params': old_params, 'lr': 0.0001},
{'params': new_params}
], lr=0.001, weight_decay=0.01)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=1.)
(loss_history, train_rmse_history, val_rmse_history,
best_val_rmse, best_model)= train_model(nn_model, train_loader, val_loader, loss,
optimizer, scheduler, num_epoch=50, plot_epoch=3 ) | Facial Keypoints Detection |
10,870,991 | test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv')
print(test.shape)
test.head()<feature_engineering> | def test_dataset_prediction(test_loader, submissions_data_file,
lookup_table_data_file, positions_name, model):
submission = pd.read_csv(submissions_data_file)
lookup_table = pd.read_csv(lookup_table_data_file)
model.eval()
for i_step,(x, img_id)in enumerate(test_loader):
x_gpu = x.to(device)
with torch.no_grad() :
prediction = model(x_gpu)
prediction = np.asarray(prediction.cpu())
for b_id in range(prediction.shape[0]):
for p_id in range(len(positions_name)) :
p_name = positions_name[p_id]
location = prediction[b_id, p_id]
row_id = lookup_table.loc[(
(lookup_table['FeatureName'] == p_name)&
(lookup_table['ImageId'] == img_id[b_id].item())
)]['RowId'].to_numpy()
if row_id.size == 1:
submission['Location'][int(row_id[0])- 1] = location
return submission | Facial Keypoints Detection |
10,870,991 | test = test.drop(['Pred'], axis=1)
test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0]))
test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1]))
test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2]))
test.head()<merge> | test_submissions = test_dataset_prediction(
test_loader, '.. /input/facial-keypoints-detection/SampleSubmission.csv',
'.. /input/facial-keypoints-detection/IdLookupTable.csv',
train_dataset.positions_name, best_model)
test_submissions.to_csv('./submission.csv', index=False ) | Facial Keypoints Detection |
4,893,231 | <merge><EOS> | class Data_Clean(object):
def __init__(self, trainPath, testPath):
self.train = pd.read_csv(trainPath)
self.test = pd.read_csv(testPath)
self.data_assemble = []
self.column = self.train.columns.drop('Image')
def seperate_data(self):
for column_name in self.train.columns.drop('Image'):
data = self.train[[column_name, 'Image']].dropna()
self.data_assemble.append([data[column_name], data['Image']])
self.test = self.test['Image']
def reshape_image(self):
for data in self.data_assemble:
data[1] = data[1].apply(lambda im: np.fromstring(im, sep=' ', dtype=float))
values = data[1].values / 255.0
values = np.vstack(values ).reshape(-1, 96, 96, 1)
data[1] = values
self.test = self.test.apply(lambda img: np.fromstring(img, sep=' ', dtype=float))
self.test = self.test.values / 255.0
self.test = np.vstack(self.test ).reshape(-1, 96, 96, 1)
def run(self):
self.seperate_data()
self.reshape_image()
return self.data_assemble, self.test, self.column
class CNN(object):
def __init__(self, train, test, columns_list):
self.train = train
self.test = test
self.columns_list = columns_list
def define_CNN(self):
model = Sequential()
model.add(Convolution2D(filters=32, kernel_size=(3, 3), padding='same', use_bias=False, input_shape=(96, 96, 1)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=32, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(filters=96, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=96, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(filters=256, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=256, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(filters=512, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(filters=512, kernel_size=(3, 3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(1))
self.model = model
def Adam(self, epochs, batchSize, xtrain, xvalidation, ytrain, yvalidation):
self.model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
self.model.fit(xtrain, ytrain, batch_size=batchSize, epochs=epochs, validation_data=(xvalidation, yvalidation),
verbose=0)
def show_a_result(self, ypred, ytest):
plt.imshow(ytest.reshape(96, 96))
ypred = copy.deepcopy(ypred)
xy = np.split(ypred, 30)
predx, predy = [], []
for i in range(0, 30, 2):
predx.append(xy[i])
predy.append(xy[i+1])
plt.plot(predx, predy, 'o', color='red')
plt.show()
def make_output(self, ypred):
pred = ypred
dataPath = '.. /input/IdLookupTable.csv'
lookid_data = pd.read_csv(dataPath)
lookid_list = list(lookid_data['FeatureName'])
imageID = list(lookid_data['ImageId'] - 1)
rowid = lookid_data['RowId']
rowid = list(rowid)
feature = []
for f in lookid_list:
feature.append(f)
preded = []
for x, y in zip(imageID, feature):
preded.append(pred[y].loc[x])
rowid = pd.Series(rowid, name='RowId')
loc = pd.Series(preded, name='Location')
submission = pd.concat([rowid, loc], axis=1)
submission.to_csv('Utkarsh.csv', index=False)
def run(self):
ypred = pd.DataFrame(index = [i for i in range(1783)] ,columns=self.columns_list)
for index, data in enumerate(self.train):
label = data[0]
columns_name = self.columns_list[index]
train = data[1]
xtrain, xvalidation, ytrain, yvalidation = train_test_split(train, label, test_size=0.1, random_state=9)
self.define_CNN()
print(columns_name, ' training started:')
self.Adam(epochs=100, batchSize=128, xtrain=xtrain, xvalidation=xvalidation, ytrain=ytrain, yvalidation=yvalidation)
ypred[columns_name] = self.model.predict(self.test)
self.make_output(ypred)
self.show_a_result(ypred.loc[159], self.test[159])
if __name__ == '__main__':
trainPath = '.. /input/training/training.csv'
testPath = '.. /input/test/test.csv'
data_clean = Data_Clean(trainPath, testPath)
train, test, columns_list = data_clean.run()
cnn = CNN(train, test, columns_list)
cnn.run()
| Facial Keypoints Detection |
11,193,217 | <SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<merge> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import albumentations as A
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.layers import Convolution2D, MaxPool2D, Dropout, BatchNormalization, Dropout, LeakyReLU
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import ResNet50
from tensorflow.python.keras.utils.data_utils import Sequence | Facial Keypoints Detection |
11,193,217 | cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"]
test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]),
how="left", on=["Season", "WTeamID", "LTeamID"])
del gameCities
gc.collect()
test.head()
cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"]
test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]),
how="left", on=["Season"])
test.head()
cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist()
test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left", left_on=["WTeamID"], right_on=["TeamID"])
test.drop(['TeamID'], axis=1, inplace=True)
test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L'))
test.drop(['TeamID'], axis=1, inplace=True)
test.head()
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season']
test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'])
test.drop(['TeamID'], axis=1, inplace=True)
test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L'))
test.drop(['TeamID'], axis=1, inplace=True)
print(test.shape)
test.head()<drop_column> | SEED = 1
np.random.seed(SEED)
tf.random.set_seed(SEED)
random.seed(SEED)
if SEED is not None:
os.environ['PYTHONHASHSEED'] = str(SEED ) | Facial Keypoints Detection |
11,193,217 | not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ]
print(not_exist_in_test)
train = train.drop(not_exist_in_test, axis=1)
train.head()<groupby> | !mkdir data/
!unzip -q /kaggle/input/facial-keypoints-detection/training.zip -d data/train
!unzip -q /kaggle/input/facial-keypoints-detection/test.zip -d data/test | Facial Keypoints Detection |
11,193,217 | team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index()
team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values]
team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index()
team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values]
del regularSeason
gc.collect()<merge> | def format_dataset(dataframe):
X = np.array([ format_image(x)for x in dataframe['Image'] ])
if len(dataframe.columns)> 2:
y = dataframe.drop('Image', axis=1 ).values
return X, y
return X
def format_image(img_row):
img = img_row.split(' ')
img = np.array(img, dtype=np.float32)
img = img.reshape(( 96,96,1))
img = img / 255.
return img
def format_keypoints(keypoint):
return(keypoint - 48.) / 48.
def unformat_keypoints(keypoint):
return keypoint*48 + 48
def show_sample(img, keypoints, axis=None, color='b'):
if axis is None:
fig, axis = plt.subplots()
axis.scatter(keypoints[0::2], keypoints[1::2], s=10, c=color)
axis.imshow(img.squeeze() , cmap='gray')
def show_random_samples(X, y, n_rows=2, n_cols=5):
fig = plt.figure(figsize=(2*n_cols, 2*n_rows), dpi=100)
for i, idx in enumerate(np.random.randint(0, len(y), n_rows*n_cols)) :
axis = fig.add_subplot(n_rows, n_cols, i+1, xticks=[], yticks=[])
show_sample(X[idx], y[idx], axis=axis)
axis.set_title(f'Sample | Facial Keypoints Detection |
11,193,217 | train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
train.head()<merge> | train_dir = 'data/train/training.csv'
test_dir = 'data/test/test.csv'
train_data = pd.read_csv(train_dir)
test_data = pd.read_csv(test_dir ) | Facial Keypoints Detection |
11,193,217 | test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
test.head()<feature_engineering> | print(f'Train sample: {len(train_data)}')
print('Pourcentage of missing values:')
train_data.isna().mean().round(4)* 100 | Facial Keypoints Detection |
11,193,217 | def preprocess(df):
df['x_score'] = df['WScore sum_x'] + df['LScore sum_y']
df['y_score'] = df['WScore sum_y'] + df['LScore sum_x']
df['x_count'] = df['WScore count_x'] + df['LScore count_y']
df['y_count'] = df['WScore count_y'] + df['WScore count_x']
df['x_var'] = df['WScore var_x'] + df['LScore count_y']
df['y_var'] = df['WScore var_y'] + df['WScore var_x']
return df
train = preprocess(train)
test = preprocess(test )<drop_column> | X_train, y_train = format_dataset(train_data)
X_test = format_dataset(test_data)
print(X_train.shape, y_train.shape, X_test.shape ) | Facial Keypoints Detection |
11,193,217 | train_win = train.copy()
train_los = train.copy()
train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W',
'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering> | show_random_samples(X_train, y_train ) | Facial Keypoints Detection |
11,193,217 | def feature_engineering(df):
df['Seed_diff'] = df['Seed_1'] - df['Seed_2']
df['Score_diff'] = df['Score_1'] - df['Score_2']
df['Count_diff'] = df['Count_1'] - df['Count_2']
df['Var_diff'] = df['Var_1'] - df['Var_2']
df['Mean_score1'] = df['Score_1'] / df['Count_1']
df['Mean_score2'] = df['Score_2'] / df['Count_2']
df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2']
df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1']
df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2']
return df
train_win = feature_engineering(train_win)
train_los = feature_engineering(train_los)
test = feature_engineering(test )<concatenate> | def create_small_dense_network() :
model = Sequential()
model.add(Input(shape=(96, 96, 1)))
model.add(Flatten())
model.add(Dense(units=100, activation='relu'))
model.add(Dense(units=30, activation=None))
return model | Facial Keypoints Detection |
11,193,217 | data = pd.concat(( train_win, train_los)).reset_index(drop=True)
print(data.shape)
data.head()<categorify> | model1 = create_small_dense_network()
es = EarlyStopping(monitor='val_loss', patience=10)
mc = ModelCheckpoint('best_model1.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
model1.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
hist1 = model1.fit(X_train, y_train, epochs=500, batch_size=256, verbose=0, validation_split=0.2, callbacks=[es, mc] ) | Facial Keypoints Detection |
11,193,217 | categoricals = ["TeamName_1", "TeamName_2"]
for c in categoricals:
le = LabelEncoder()
data[c] = data[c].fillna("NaN")
data[c] = le.fit_transform(data[c])
test[c] = le.transform(test[c])
data.head()<drop_column> | model1.load_weights('best_model1.h5')
show_random_preds(model1, X_test ) | Facial Keypoints Detection |
11,193,217 | target = 'result'
features = data.columns.values.tolist()
features.remove(target )<train_on_grid> | def create_convnet(n_outputs=30):
model = Sequential()
model.add(Convolution2D(32,(5,5), padding='same', input_shape=(96,96,1)))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(32,(5,5), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Convolution2D(64,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(64,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Convolution2D(96,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(96,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Convolution2D(128,(3,3),padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(128,(3,3),padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Convolution2D(256,(3,3),padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(256,(3,3),padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Convolution2D(512,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Convolution2D(512,(3,3), padding='same'))
model.add(LeakyReLU(alpha=0.1))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(n_outputs))
return model | Facial Keypoints Detection |
11,193,217 | nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )<train_model> | model2 = create_convnet()
es = EarlyStopping(monitor='val_loss', patience=10)
mc = ModelCheckpoint('best_model2.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
model2.compile(optimizer=Adam() , loss='mean_squared_error', metrics=['mae'])
hist2 = model2.fit(X_train, y_train, epochs=50, batch_size=128, verbose=0, validation_split=0.10, callbacks=[es, mc] ) | Facial Keypoints Detection |
11,193,217 | lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<create_dataframe> | model2.load_weights('best_model2.h5')
show_random_preds(model2, X_test ) | Facial Keypoints Detection |
11,193,217 | catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv> | class DataLoader(Sequence):
def __init__(self, X, y, batch_size, augmentations=None, as_rgb=False):
self.X, self.y = X, y
self.batch_size = batch_size
self.augment = augmentations
self.shuffle = True
self.as_rgb = as_rgb
self.on_epoch_end()
def __len__(self):
return int(np.ceil(len(self.X)/ float(self.batch_size)))
def __getitem__(self, idx):
indexes = self.indexes[idx*self.batch_size:(idx+1)*self.batch_size]
batch_X = self.X[indexes,...]
batch_y = self.y[indexes, :]
if self.as_rgb:
batch_X = np.tile(batch_X, reps=(1,1,1,3))
if self.augment is not None:
keypoints = np.array([ tuple(zip(point[::2], point[1::2])) for point in batch_y ])
transformed = [ self.augment(image=x, keypoints=y)for x,y in zip(batch_X, keypoints)]
batch_X = np.stack([ z['image'] for z in transformed ], axis=0)
batch_y = np.stack([ np.array(z['keypoints'] ).flatten(order='C')for z in transformed ], axis=0)
return batch_X, batch_y
def on_epoch_end(self):
self.indexes = np.arange(len(self.X))
if self.shuffle == True:
np.random.shuffle(self.indexes ) | Facial Keypoints Detection |
11,193,217 | submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv')
submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred
submission_df<save_to_csv> | X_train2, X_valid, y_train2, y_valid = train_test_split(X_train, y_train, test_size=0.10, shuffle=True)
transform = A.Compose([
A.ShiftScaleRotate(rotate_limit=30, p=0.5),
A.RandomBrightnessContrast(p=0.5),
A.GaussianBlur(p=0.5),
A.GaussNoise(var_limit=(1e-5, 1e-3), p=0.5),
], keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))
train_loader = DataLoader(X_train2, y_train2, batch_size=128, augmentations=transform)
print(X_train2.shape, y_train2.shape)
print(X_valid.shape, y_valid.shape)
x_batch, y_batch = train_loader[1]
show_random_samples(x_batch.squeeze() , y_batch ) | Facial Keypoints Detection |
11,193,217 | submission_df.to_csv('submission.csv', index=False )<set_options> | es = EarlyStopping(monitor='val_loss', patience=20)
mc = ModelCheckpoint('best_model3.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
model3 = create_convnet()
model3.compile(optimizer=Adam() , loss='mean_squared_error', metrics=['mae'])
hist3 = model3.fit(train_loader, steps_per_epoch=len(train_loader),
validation_data=(X_valid, y_valid),
epochs=500, verbose=0, callbacks=[es, mc] ) | Facial Keypoints Detection |
11,193,217 | pd.set_option('max_columns', None)
plt.style.use('fivethirtyeight')
%matplotlib inline
py.init_notebook_mode(connected=True)
warnings.filterwarnings('ignore')
print("Libraries imported!" )<train_model> | model3.load_weights('best_model3.h5')
show_random_preds(model3, X_test ) | Facial Keypoints Detection |
11,193,217 | class BaseModel(object):
def __init__(self, train_df, test_df, target, features, categoricals=[],
n_splits=3, cv_method="KFold", group=None, task="regression",
parameter_tuning=False, scaler=None, verbose=True):
self.train_df = train_df
self.test_df = test_df
self.target = target
self.features = features
self.n_splits = n_splits
self.categoricals = categoricals
self.cv_method = cv_method
self.group = group
self.task = task
self.parameter_tuning = parameter_tuning
self.scaler = scaler
self.cv = self.get_cv()
self.verbose = verbose
self.params = self.get_params()
self.y_pred, self.score, self.model, self.oof, self.y_val, self.fi_df = self.fit()
def train_model(self, train_set, val_set):
raise NotImplementedError
def get_params(self):
raise NotImplementedError
def convert_dataset(self, x_train, y_train, x_val, y_val):
raise NotImplementedError
def convert_x(self, x):
return x
def calc_metric(self, y_true, y_pred):
if self.task == "classification":
return log_loss(y_true, y_pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(y_true, y_pred))
def get_cv(self):
if self.cv_method == "KFold":
cv = KFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df)
elif self.cv_method == "StratifiedKFold":
cv = StratifiedKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target])
elif self.cv_method == "TimeSeriesSplit":
cv = TimeSeriesSplit(max_train_size=None, n_splits=self.n_splits)
return cv.split(self.train_df)
elif self.cv_method == "GroupKFold":
cv = GroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target], self.group)
elif self.cv_method == "StratifiedGroupKFold":
cv = StratifiedGroupKFold(n_splits=self.n_splits, shuffle=True, random_state=42)
return cv.split(self.train_df, self.train_df[self.target], self.group)
def fit(self):
oof_pred = np.zeros(( self.train_df.shape[0],))
y_vals = np.zeros(( self.train_df.shape[0],))
y_pred = np.zeros(( self.test_df.shape[0],))
if self.group is not None:
if self.group in self.features:
self.features.remove(self.group)
if self.group in self.categoricals:
self.categoricals.remove(self.group)
fi = np.zeros(( self.n_splits, len(self.features)))
if self.scaler is not None:
numerical_features = [f for f in self.features if f not in self.categoricals]
self.train_df[numerical_features] = self.train_df[numerical_features].fillna(self.train_df[numerical_features].median())
self.test_df[numerical_features] = self.test_df[numerical_features].fillna(self.test_df[numerical_features].median())
self.train_df[self.categoricals] = self.train_df[self.categoricals].fillna(self.train_df[self.categoricals].mode().iloc[0])
self.test_df[self.categoricals] = self.test_df[self.categoricals].fillna(self.test_df[self.categoricals].mode().iloc[0])
if self.scaler == "MinMax":
scaler = MinMaxScaler()
elif self.scaler == "Standard":
scaler = StandardScaler()
df = pd.concat([self.train_df[numerical_features], self.test_df[numerical_features]], ignore_index=True)
scaler.fit(df[numerical_features])
x_test = self.test_df.copy()
x_test[numerical_features] = scaler.transform(x_test[numerical_features])
x_test = [np.absolute(x_test[i])for i in self.categoricals] + [x_test[numerical_features]]
else:
x_test = self.test_df[self.features]
for fold,(train_idx, val_idx)in enumerate(self.cv):
x_train, x_val = self.train_df.loc[train_idx, self.features], self.train_df.loc[val_idx, self.features]
y_train, y_val = self.train_df.loc[train_idx, self.target], self.train_df.loc[val_idx, self.target]
if self.scaler is not None:
x_train[numerical_features] = scaler.transform(x_train[numerical_features])
x_val[numerical_features] = scaler.transform(x_val[numerical_features])
x_train = [np.absolute(x_train[i])for i in self.categoricals] + [x_train[numerical_features]]
x_val = [np.absolute(x_val[i])for i in self.categoricals] + [x_val[numerical_features]]
train_set, val_set = self.convert_dataset(x_train, y_train, x_val, y_val)
model, importance = self.train_model(train_set, val_set)
fi[fold, :] = importance
conv_x_val = self.convert_x(x_val)
y_vals[val_idx] = y_val
oof_pred[val_idx] = model.predict(conv_x_val ).reshape(oof_pred[val_idx].shape)
x_test = self.convert_x(x_test)
y_pred += model.predict(x_test ).reshape(y_pred.shape)/ self.n_splits
print('Partial score of fold {} is: {}'.format(fold, self.calc_metric(y_val, oof_pred[val_idx])))
fi_df = pd.DataFrame()
for n in np.arange(self.n_splits):
tmp = pd.DataFrame()
tmp["features"] = self.features
tmp["importance"] = fi[n, :]
tmp["fold"] = n
fi_df = pd.concat([fi_df, tmp], ignore_index=True)
gfi = fi_df[["features", "importance"]].groupby(["features"] ).mean().reset_index()
fi_df = fi_df.merge(gfi, on="features", how="left", suffixes=('', '_mean'))
loss_score = self.calc_metric(self.train_df[self.target], oof_pred)
if self.verbose:
print('Our oof loss score is: ', loss_score)
return y_pred, loss_score, model, oof_pred, y_vals, fi_df
def plot_feature_importance(self, rank_range=[1, 50]):
fig, ax = plt.subplots(1, 1, figsize=(10, 20))
sorted_df = self.fi_df.sort_values(by = "importance_mean", ascending=False ).reset_index().iloc[self.n_splits *(rank_range[0]-1): self.n_splits * rank_range[1]]
sns.barplot(data=sorted_df, x ="importance", y ="features", orient='h')
ax.set_xlabel("feature importance")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return sorted_df<train_model> | def create_specialist(n_outputs=30, weights=None, freeze=False):
model = create_convnet()
if weights is not None:
model.load_weights(weights)
if freeze:
for layers in model.layers[:-10]:
layers.trainable = False
if n_outputs != 30:
model.layers.pop()
model.add(Dense(n_outputs))
return model
def train_specialist(model, keypoints_range, model_name):
train_data = pd.read_csv(train_dir)
select_col_idx = list(range(*keypoints_range[model_name])) + [-1]
subdata = train_data.iloc[:, select_col_idx]
subdata = subdata.dropna()
X_train, y_train = format_dataset(subdata)
X_train2, X_valid, y_train2, y_valid = train_test_split(X_train, y_train, test_size=0.10, shuffle=True)
transform = A.Compose([A.ShiftScaleRotate(rotate_limit=30, p=0.5),
A.RandomBrightnessContrast(p=0.5),
A.GaussianBlur(p=0.5),
A.GaussNoise(var_limit=(1e-5, 1e-3), p=0.5)],
keypoint_params=A.KeypointParams(format='xy', remove_invisible=False))
train_loader = DataLoader(X_train2, y_train2, batch_size=128, augmentations=transform)
es = EarlyStopping(monitor='val_loss', patience=10)
mc = ModelCheckpoint(f'best_model_{model_name}.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
model.compile(optimizer=Adam() , loss='mean_squared_error', metrics=['mae'])
hist = model.fit(train_loader, steps_per_epoch=len(train_loader),
validation_data=(X_valid, y_valid),
epochs=250, verbose=0, callbacks=[es, mc])
model.load_weights(f'best_model_{model_name}.h5')
return model, hist
class ConcatenateSpecialists:
def __init__(self, models):
self.models = models
def predict(self, X):
return np.hstack([ m.predict(X)for m in self.models ])
| Facial Keypoints Detection |
11,193,217 | class LgbModel(BaseModel):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
model = lgb.train(self.params, train_set, num_boost_round = 5000, valid_sets=[train_set, val_set], verbose_eval=verbosity)
fi = model.feature_importance(importance_type="gain")
return model, fi
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = lgb.Dataset(x_train, y_train, categorical_feature=self.categoricals)
val_set = lgb.Dataset(x_val, y_val, categorical_feature=self.categoricals)
return train_set, val_set
def get_params(self):
params = {
'num_leaves': 127,
'min_data_in_leaf': 50,
'max_depth': -1,
'learning_rate': 0.005,
"boosting_type": "gbdt",
"bagging_seed": 11,
"verbosity": -1,
'random_state': 42,
}
if self.task == "regression":
params["objective"] = "regression"
params["metric"] = "rmse"
elif self.task == "classification":
params["objective"] = "binary"
params["metric"] = "binary_logloss"
if self.parameter_tuning == True:
def objective(trial):
train_x, test_x, train_y, test_y = train_test_split(self.train_df[self.features],
self.train_df[self.target],
test_size=0.3, random_state=42)
dtrain = lgb.Dataset(train_x, train_y, categorical_feature=self.categoricals)
dtest = lgb.Dataset(test_x, test_y, categorical_feature=self.categoricals)
hyperparams = {'num_leaves': trial.suggest_int('num_leaves', 24, 1024),
'boosting_type': 'gbdt',
'objective': params["objective"],
'metric': params["metric"],
'max_depth': trial.suggest_int('max_depth', 4, 16),
'min_child_weight': trial.suggest_int('min_child_weight', 1, 20),
'feature_fraction': trial.suggest_uniform('feature_fraction', 0.4, 1.0),
'bagging_fraction': trial.suggest_uniform('bagging_fraction', 0.4, 1.0),
'bagging_freq': trial.suggest_int('bagging_freq', 1, 7),
'min_child_samples': trial.suggest_int('min_child_samples', 5, 100),
'lambda_l1': trial.suggest_loguniform('lambda_l1', 1e-8, 10.0),
'lambda_l2': trial.suggest_loguniform('lambda_l2', 1e-8, 10.0),
'early_stopping_rounds': 100
}
model = lgb.train(hyperparams, dtrain, valid_sets=dtest, verbose_eval=500)
pred = model.predict(test_x)
if self.task == "classification":
return log_loss(test_y, pred)
elif self.task == "regression":
return np.sqrt(mean_squared_error(test_y, pred))
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=50)
print('Number of finished trials: {}'.format(len(study.trials)))
print('Best trial:')
trial = study.best_trial
print(' Value: {}'.format(trial.value))
print(' Params: ')
for key, value in trial.params.items() :
print(' {}: {}'.format(key, value))
params = trial.params
params["learning_rate"] = 0.001
plot_optimization_history(study)
return params<train_model> | specialist_keypoints = {'eyes_centers':(0,4), 'eyes_corners':(4,12), 'eyebrows':(12,20), 'nose':(20,22), 'mouth':(22,30)}
models = {}
for region, keypoint_ids in specialist_keypoints.items() :
print(f'Training model {region}...')
model = create_specialist(n_outputs=keypoint_ids[1]-keypoint_ids[0], weights='best_model3.h5', freeze=False)
models[region] = train_specialist(model, specialist_keypoints, region ) | Facial Keypoints Detection |
11,193,217 | class CatbModel(BaseModel):
def train_model(self, train_set, val_set):
verbosity = 100 if self.verbose else 0
if self.task == "regression":
model = CatBoostRegressor(**self.params)
elif self.task == "classification":
model = CatBoostClassifier(**self.params)
model.fit(train_set['X'], train_set['y'], eval_set=(val_set['X'], val_set['y']),
verbose=verbosity, cat_features=self.categoricals)
return model, model.get_feature_importance()
def convert_dataset(self, x_train, y_train, x_val, y_val):
train_set = {'X': x_train, 'y': y_train}
val_set = {'X': x_val, 'y': y_val}
return train_set, val_set
def get_params(self):
params = { 'task_type': "CPU",
'learning_rate': 0.01,
'iterations': 1000,
'random_seed': 42,
'use_best_model': True
}
if self.task == "regression":
params["loss_function"] = "RMSE"
elif self.task == "classification":
params["loss_function"] = "Logloss"
return params<normalization> | model = model3
layer_names = [layer.name for layer in model.layers]
intermediate_outputs = [layer.output for layer in model.layers]
visualization_model = Model(inputs = model.input, outputs = intermediate_outputs)
n = 10
x = X_test[0:2]
feature_maps = visualization_model.predict(x)
for layer_name, feature_map in zip(layer_names, feature_maps):
if len(feature_map.shape)== 4:
n_features = feature_map.shape[-1]
size = feature_map.shape[ 1]
display_grid = np.zeros(( size, size * n))
for i, idx in enumerate(np.random.randint(0, n_features, n)) :
x = feature_map[0, :, :, i]
x -= x.mean()
x /= x.std()
x *= 64
x += 128
x = np.clip(x, 0, 255 ).astype('uint8')
display_grid[:, i * size :(i + 1)* size] = x
scale = 3
plt.figure(figsize=(scale * n, scale))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto', cmap='viridis' ) | Facial Keypoints Detection |
11,193,217 | class Mish(Layer):
def __init__(self, **kwargs):
super(Mish, self ).__init__(**kwargs)
def build(self, input_shape):
super(Mish, self ).build(input_shape)
def call(self, x):
return x * K.tanh(K.softplus(x))
def compute_output_shape(self, input_shape):
return input_shape
class LayerNormalization(keras.layers.Layer):
def __init__(self,
center=True,
scale=True,
epsilon=None,
gamma_initializer='ones',
beta_initializer='zeros',
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs):
super(LayerNormalization, self ).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'gamma_initializer': keras.initializers.serialize(self.gamma_initializer),
'beta_initializer': keras.initializers.serialize(self.beta_initializer),
'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer),
'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer),
'gamma_constraint': keras.constraints.serialize(self.gamma_constraint),
'beta_constraint': keras.constraints.serialize(self.beta_constraint),
}
base_config = super(LayerNormalization, self ).get_config()
return dict(list(base_config.items())+ list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, input_mask=None):
return input_mask
def build(self, input_shape):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name='gamma',
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta',
)
super(LayerNormalization, self ).build(input_shape)
def call(self, inputs, training=None):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs =(inputs - mean)/ std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs<train_model> | def create_submission_file(model, X_test, save_name='model_preds'):
predictions = model.predict(X_test)
print(f'Shape: {predictions.shape} - Min: {predictions.min() } - Max: {predictions.max() }')
predictions[predictions > 96] = 96
lookid_data = pd.read_csv('/kaggle/input/facial-keypoints-detection/IdLookupTable.csv')
image_id = list(lookid_data['ImageId']-1)
landmark_names = list(lookid_data['FeatureName'])
landmark_ids = [ landmark_names.index(f)for f in landmark_names ]
expected_preds = [ predictions[x,y] for x,y in zip(image_id, landmark_ids)]
rowid = pd.Series(lookid_data['RowId'], name = 'RowId')
loc = pd.Series(expected_preds, name = 'Location')
submission = pd.concat([rowid, loc], axis = 1)
submission.to_csv(f'{save_name}.csv',index = False)
print(f'Successfully created {save_name}.csv !' ) | Facial Keypoints Detection |
11,193,217 | <load_from_csv><EOS> | create_submission_file(model1, X_test, 'model_preds1')
create_submission_file(model2, X_test, 'model_preds2')
create_submission_file(model3, X_test, 'model_preds3')
create_submission_file(model4, X_test, 'model_preds4' ) | Facial Keypoints Detection |
10,697,594 | <SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<feature_engineering> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import clear_output
from tqdm import tqdm
import os
import keras | Facial Keypoints Detection |
10,697,594 | data_dict['WNCAATourneySeeds']['Seed'] = data_dict['WNCAATourneySeeds']['Seed'].apply(lambda x: int(x[1:3]))
data_dict[fname].head()<load_from_csv> | Train_Dir = '/kaggle/input/facial-keypoints-detection/training.zip'
Test_Dir = '/kaggle/input/facial-keypoints-detection/test.zip'
lookid_dir = '/kaggle/input/facial-keypoints-detection/IdLookupTable.csv'
train_data = pd.read_csv(Train_Dir)
test_data = pd.read_csv(Test_Dir)
lookid_data = pd.read_csv(lookid_dir)
os.listdir('.. /input' ) | Facial Keypoints Detection |
10,697,594 | test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv')
print(test.shape)
test.head()<feature_engineering> | train_data.isnull().sum() | Facial Keypoints Detection |
10,697,594 | test = test.drop(['Pred'], axis=1)
test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0]))
test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1]))
test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2]))
test.head()<merge> | feature_8 = ['left_eye_center_x','left_eye_center_y','right_eye_center_x','right_eye_center_y','nose_tip_x','nose_tip_y','mouth_center_bottom_lip_x','mouth_center_bottom_lip_y','Image']
train_8 = train_data[feature_8].dropna().reset_index()
train_30=train_data.dropna().reset_index() | Facial Keypoints Detection |
10,697,594 | gameCities = pd.merge(data_dict['WGameCities'], data_dict['Cities'], how='left', on=['CityID'])
cols_to_use = gameCities.columns.difference(train.columns ).tolist() + ["Season", "WTeamID", "LTeamID"]
train = train.merge(gameCities[cols_to_use], how="left", on=["Season", "WTeamID", "LTeamID"])
train.head()
cols_to_use = data_dict["WSeasons"].columns.difference(train.columns ).tolist() + ["Season"]
train = train.merge(data_dict["WSeasons"][cols_to_use], how="left", on=["Season"])
train.head()
cols_to_use = data_dict["WTeams"].columns.difference(train.columns ).tolist()
train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["WTeamID"], right_on=["TeamID"])
train.drop(['TeamID'], axis=1, inplace=True)
train = train.merge(data_dict["WTeams"][cols_to_use], how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L'))
train.drop(['TeamID'], axis=1, inplace=True)
print(train.shape)
train.head()<merge> | def str_to_int(train1):
images = train1.Image.values
del train1['Image']
del train1['index']
y=train1.values
x = []
for i in tqdm(images):
q=[int(j)for j in i.split() ]
x.append(q)
x=np.array(x)
x=x.reshape(-1,96,96,1)
x=x/255.0
return([x,y] ) | Facial Keypoints Detection |
10,697,594 | cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(train.columns ).tolist() + ['Season']
train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'])
train.drop(['TeamID'], axis=1, inplace=True)
train = train.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L'))
train.drop(['TeamID'], axis=1, inplace=True)
print(train.shape)
train.head()<merge> | from keras.models import Sequential
from keras.layers import Activation,Convolution2D,MaxPooling2D,BatchNormalization,Flatten,Dense,Dropout,Conv2D,MaxPool2D,ZeroPadding2D
from keras.layers.advanced_activations import LeakyReLU | Facial Keypoints Detection |
10,697,594 | cols_to_use = gameCities.columns.difference(test.columns ).tolist() + ["Season", "WTeamID", "LTeamID"]
test = test.merge(gameCities[cols_to_use].drop_duplicates(subset=["Season", "WTeamID", "LTeamID"]),
how="left", on=["Season", "WTeamID", "LTeamID"])
del gameCities
gc.collect()
test.head()
cols_to_use = data_dict["WSeasons"].columns.difference(test.columns ).tolist() + ["Season"]
test = test.merge(data_dict["WSeasons"][cols_to_use].drop_duplicates(subset=["Season"]),
how="left", on=["Season"])
test.head()
cols_to_use = data_dict["WTeams"].columns.difference(test.columns ).tolist()
test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left", left_on=["WTeamID"], right_on=["TeamID"])
test.drop(['TeamID'], axis=1, inplace=True)
test = test.merge(data_dict["WTeams"][cols_to_use].drop_duplicates(subset=["TeamID"]),
how="left", left_on=["LTeamID"], right_on=["TeamID"], suffixes=('_W', '_L'))
test.drop(['TeamID'], axis=1, inplace=True)
test.head()
cols_to_use = data_dict['WNCAATourneySeeds'].columns.difference(test.columns ).tolist() + ['Season']
test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'])
test.drop(['TeamID'], axis=1, inplace=True)
test = test.merge(data_dict['WNCAATourneySeeds'][cols_to_use].drop_duplicates(subset=["Season","TeamID"]),
how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], suffixes=('_W', '_L'))
test.drop(['TeamID'], axis=1, inplace=True)
print(test.shape)
test.head()<drop_column> | def create_model(out=8):
model = Sequential()
model.add(Convolution2D(32,(3,3), padding='same', use_bias=False, input_shape=(96,96,1)))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(32,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(64,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(64,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(96,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(96,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(128,(3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(128,(3,3),padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(256,(3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(256,(3,3),padding='same',use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Convolution2D(512,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Convolution2D(512,(3,3), padding='same', use_bias=False))
model.add(LeakyReLU(alpha = 0.1))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(512,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(out))
model.summary()
model.compile(optimizer = 'adam' , loss = "mean_squared_error", metrics=["mae"])
return model | Facial Keypoints Detection |
10,697,594 | not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ]
print(not_exist_in_test)
train = train.drop(not_exist_in_test, axis=1)
train.head()<groupby> | model_30 = create_model(out=30)
model_8 = create_model(out=8 ) | Facial Keypoints Detection |
10,697,594 | team_win_score = regularSeason.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index()
team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values]
team_loss_score = regularSeason.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index()
team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values]
del regularSeason
gc.collect()<merge> | LR_callback = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=4, verbose=10, factor=.4, min_lr=.00001)
EarlyStop_callback = keras.callbacks.EarlyStopping(patience=15, restore_best_weights=True ) | Facial Keypoints Detection |
10,697,594 | train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
train.head()<merge> | history = model_8.fit(X_train_8,Y_train_8,validation_split=.1,batch_size=64,epochs=50,callbacks=[LR_callback,EarlyStop_callback] ) | Facial Keypoints Detection |
10,697,594 | test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
test.head()<feature_engineering> | history = model_30.fit(X_train_30,Y_train_30,validation_split=.1,batch_size=64,epochs=50,callbacks=[LR_callback,EarlyStop_callback])
| Facial Keypoints Detection |
10,697,594 | def preprocess(df):
df['x_score'] = df['WScore sum_x'] + df['LScore sum_y']
df['y_score'] = df['WScore sum_y'] + df['LScore sum_x']
df['x_count'] = df['WScore count_x'] + df['LScore count_y']
df['y_count'] = df['WScore count_y'] + df['WScore count_x']
df['x_var'] = df['WScore var_x'] + df['LScore var_x']
df['y_var'] = df['WScore var_y'] + df['LScore var_y']
return df
train = preprocess(train)
test = preprocess(test )<drop_column> | y_hat_30 = model_30.predict(x)
y_hat_8 = model_8.predict(x)
print('Predictions shape', y_hat_30.shape)
print('Predictions shape', y_hat_8.shape ) | Facial Keypoints Detection |
10,697,594 | train_win = train.copy()
train_los = train.copy()
train_win = train_win[['Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_los = train_los[['Seed_L', 'Seed_W', 'TeamName_L', 'TeamName_W',
'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_win.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
train_los.columns = ['Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
test = test[['ID', 'Seed_W', 'Seed_L', 'TeamName_W', 'TeamName_L',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
test.columns = ['ID', 'Seed_1', 'Seed_2', 'TeamName_1', 'TeamName_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering> | feature_8_index=[0,1,2,3,20,21,28,29]
for i in range(8):
y_hat_30[:,feature_8_index[i]] = y_hat_8[:,i] | Facial Keypoints Detection |
10,697,594 | def feature_engineering(df):
df['Seed_diff'] = df['Seed_1'] - df['Seed_2']
df['Score_diff'] = df['Score_1'] - df['Score_2']
df['Count_diff'] = df['Count_1'] - df['Count_2']
df['Var_diff'] = df['Var_1'] - df['Var_2']
df['Mean_score1'] = df['Score_1'] / df['Count_1']
df['Mean_score2'] = df['Score_2'] / df['Count_2']
df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2']
df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1']
df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2']
return df
train_win = feature_engineering(train_win)
train_los = feature_engineering(train_los)
test = feature_engineering(test )<concatenate> | required_features = list(lookid_data['FeatureName'])
imageID = list(lookid_data['ImageId']-1)
feature_to_num = dict(zip(required_features[0:30], range(30)) ) | Facial Keypoints Detection |
10,697,594 | data = pd.concat(( train_win, train_los)).reset_index(drop=True)
print(data.shape)
data.head()<categorify> | feature_ind = []
for f in required_features:
feature_ind.append(feature_to_num[f] ) | Facial Keypoints Detection |
10,697,594 | categoricals = ["TeamName_1", "TeamName_2"]
for c in categoricals:
le = LabelEncoder()
data[c] = data[c].fillna("NaN")
data[c] = le.fit_transform(data[c])
test[c] = le.transform(test[c])
data.head()<drop_column> | required_pred = []
for x,y in zip(imageID,feature_ind):
required_pred.append(y_hat_30[x, y] ) | Facial Keypoints Detection |
10,697,594 | target = 'result'
features = data.columns.values.tolist()
features.remove(target )<train_on_grid> | rowid = lookid_data['RowId']
loc30 = pd.Series(required_pred,name = 'Location')
submission = pd.concat([rowid,loc30],axis = 1)
submission.to_csv('Predictions.csv',index = False ) | Facial Keypoints Detection |
4,220,305 | nn = NeuralNetworkModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler="MinMax", verbose=True )<train_model> | Train_Dir = '.. /input/training/training.csv'
Test_Dir = '.. /input/test/test.csv'
lookid_dir = '.. /input/IdLookupTable.csv'
train_data = pd.read_csv(Train_Dir)
test_data = pd.read_csv(Test_Dir)
lookid_data = pd.read_csv(lookid_dir)
os.listdir('.. /input')
train_data.fillna(method = 'ffill',inplace = True)
imag = []
for i in range(0,7049):
img = train_data['Image'][i].split(' ')
img = ['0' if x == '' else x for x in img]
imag.append(img)
image_list = np.array(imag,dtype = 'float')
X_train = image_list.reshape(-1,96,96)
training = train_data.drop('Image',axis = 1)
y_train = []
for i in range(0,7049):
y = training.iloc[i,:]
y_train.append(y)
Y_train = np.array(y_train,dtype = 'float' ) | Facial Keypoints Detection |
4,220,305 | lgbm = LgbModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<create_dataframe> | Facial Keypoints Detection |
|
4,220,305 | catb = CatbModel(data, test, target, features, categoricals=categoricals, n_splits=10,
cv_method="StratifiedKFold", group=None, task="classification", scaler=None, verbose=True )<load_from_csv> | def log_test(net,test_iter,ctx):
test_l = 0
for X,y in test_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
l = loss(net(X),y)
test_l = l.mean().asscalar()
return test_l
def train(net, X_train, Y_train,num_epochs, trainer, batch_size, ctx):
train_files,val_files = train_test_split(range(len(X_train)) ,test_size=0.1,shuffle=True)
train_data,train_label = nd.array(X_train[train_files]),nd.array(Y_train[train_files])
val_data,val_label = nd.array(X_train[val_files]),nd.array(Y_train[val_files])
train_iter = gdata.DataLoader(gdata.ArrayDataset(train_data,train_label),batch_size,shuffle=True)
test_iter = gdata.DataLoader(gdata.ArrayDataset(val_data,val_label),batch_size)
train_ls,test_ls=[],[]
print('train on:',ctx)
for epoch in range(num_epochs):
start = time.time()
train_sum_l =0
for X,y in train_iter:
X, y = X.expand_dims(axis=1 ).as_in_context(ctx), y.as_in_context(ctx)
with autograd.record() :
l = loss(net(X),y)
l.backward()
trainer.step(batch_size)
train_loss = loss(net(train_data.expand_dims(axis=1 ).as_in_context(ctx)) ,train_label.as_in_context(ctx)).mean().asscalar()
train_ls.append(train_loss)
print()
if val_files:
test_ls.append(loss(net(val_data.expand_dims(axis=1 ).as_in_context(ctx)) ,val_label.as_in_context(ctx)).mean().asscalar())
else:
test_ls.append(train_loss)
print('epoch %d, train loss %.4f, test loss %.3f, time %.1f sec' %(epoch + 1,train_ls[-1], test_ls[-1], time.time() - start))
return train_ls, test_ls | Facial Keypoints Detection |
4,220,305 | submission_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/WSampleSubmissionStage1_2020.csv')
submission_df['Pred'] = 0.7 * lgbm.y_pred + 0.2 * catb.y_pred + 0.1 * nn.y_pred
submission_df<save_to_csv> | ctx=mx.gpu(0)
resnet = vision.resnet34_v1(pretrained=False, ctx=mx.cpu() ) | Facial Keypoints Detection |
4,220,305 | submission_df.to_csv('submission.csv', index=False )<import_modules> | fine_net = resnet.features
fine_net.add(nn.Dense(30))
| Facial Keypoints Detection |
4,220,305 | import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import random
import os<set_options> | lr, num_epochs = 0.001, 500
batch_size=128
loss = gloss.L2Loss()
net = fine_net
net.initialize(force_reinit=True,ctx=ctx, init=init.Xavier())
net.collect_params().reset_ctx(ctx)
trainer = gluon.Trainer(net.collect_params() ,'adam',{'learning_rate':lr} ) | Facial Keypoints Detection |
4,220,305 | print("TF version: ", tf.__version__)
if tf.__version__ < "2.0.0":
tf.enable_eager_execution()
print("Eager execution enabled.")
else:
print("Eager execution enabled by default.")
if tf.test.gpu_device_name() :
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
print("Please install GPU version of TF" )<define_variables> | train_ls,test_ls = train(net, X_train, Y_train,num_epochs, trainer, batch_size, ctx ) | Facial Keypoints Detection |
4,220,305 | def setseed(seed = 0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
SEED = 0
setseed(SEED)
setseed()<load_from_csv> | Y_test = net(nd.array(X_test ).expand_dims(axis=1 ).as_in_context(ctx))
Y_test[0] | Facial Keypoints Detection |
4,220,305 | <import_modules><EOS> | lookid_list = list(lookid_data['FeatureName'])
imageID = list(lookid_data['ImageId']-1)
pre_list = list(Y_test.asnumpy())
rowid = lookid_data['RowId']
rowid=list(rowid)
feature = []
for f in list(lookid_data['FeatureName']):
feature.append(lookid_list.index(f))
preded = []
for x,y in zip(imageID,feature):
preded.append(pre_list[x][y] if pre_list[x][y]<96 else 96)
rowid = pd.Series(rowid,name = 'RowId')
loc = pd.Series(preded,name = 'Location')
submission = pd.concat([rowid,loc],axis = 1)
submission.to_csv('resnet18_submission_0616.csv',index = False ) | Facial Keypoints Detection |
8,233,083 | <SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<string_transform> | %matplotlib inline | Facial Keypoints Detection |
8,233,083 | def clean_sentences(df):
reviews = []
for sent in tqdm(df['Phrase']):
review_text = re.sub("[^a-zA-Z]"," ", sent)
words = word_tokenize(review_text.lower())
lemmatizer = WordNetLemmatizer()
lemma_words = [lemmatizer.lemmatize(i)for i in words]
reviews.append(lemma_words)
return(reviews )<string_transform> | train_zip_path = '/kaggle/input/facial-keypoints-detection/training.zip'
test_zip_path = '/kaggle/input/facial-keypoints-detection/test.zip'
Id_table_path = '/kaggle/input/facial-keypoints-detection/IdLookupTable.csv'
sample_sub_path = '/kaggle/input/facial-keypoints-detection/SampleSubmission.csv'
extracted_files_path = '/kaggle/working' | Facial Keypoints Detection |
8,233,083 | %%time
train_sentences = clean_sentences(train)
test_sentences = clean_sentences(test)
print(len(train_sentences))
print(len(test_sentences))<string_transform> | with zipfile.ZipFile(train_zip_path, 'r')as zip_ref:
zip_ref.extractall(extracted_files_path)
with zipfile.ZipFile(test_zip_path, 'r')as zip_ref:
zip_ref.extractall(extracted_files_path ) | Facial Keypoints Detection |
8,233,083 | print(train['Phrase'][0])
print(' '.join(train_sentences[0]))<categorify> | train_csv = pd.read_csv(extracted_files_path + '/training.csv')
test_csv = pd.read_csv(extracted_files_path + '/test.csv')
looktable_csv = pd.read_csv(Id_table_path ) | Facial Keypoints Detection |
8,233,083 | target = train.Sentiment.values
y_target = to_categorical(target)
num_classes = y_target.shape[1]<import_modules> | feature_8 = ['left_eye_center_x', 'left_eye_center_y',
'right_eye_center_x','right_eye_center_y',
'nose_tip_x', 'nose_tip_y',
'mouth_center_bottom_lip_x',
'mouth_center_bottom_lip_y', 'Image']
train_8_csv = train_csv[feature_8].dropna().reset_index()
train_30_csv = train_csv.dropna().reset_index() | Facial Keypoints Detection |
8,233,083 | from sklearn.model_selection import train_test_split<import_modules> | def str_to_array(pd_series):
data_size = len(pd_series)
X = np.zeros(shape=(data_size,96,96,1), dtype=np.float32)
for i in tqdm(range(data_size)) :
img_str = pd_series[i]
img_list = img_str.split(' ')
img_array = np.array(img_list, dtype=np.float32)
img_array = img_array.reshape(96,96,1)
X[i] = img_array
return X | Facial Keypoints Detection |
8,233,083 | from sklearn.model_selection import train_test_split<split> | X_train_30 = str_to_array(train_30_csv['Image'])
labels_30 = train_30_csv.drop(['index','Image'], axis=1)
y_train_30 = labels_30.to_numpy(dtype=np.float32)
print('X_train with 30 feature shape: ', X_train_30.shape)
print('y_train with 30 feature shape: ', y_train_30.shape ) | Facial Keypoints Detection |
8,233,083 | X_train, X_val, y_train, y_val = train_test_split(train_sentences,
y_target,
test_size = 0.2,
stratify = y_target )<count_unique_values> | X_train_8 = str_to_array(train_8_csv['Image'])
labels_8 = train_8_csv.drop(['index','Image'], axis=1)
y_train_8 = labels_8.to_numpy(dtype=np.float32)
print('X_train with 8 feature shape: ', X_train_8.shape)
print('y_train with 8 feature shape: ', y_train_8.shape ) | Facial Keypoints Detection |
8,233,083 | unique_words = set()
len_max = 0
for sent in tqdm(X_train):
unique_words.update(sent)
if(len_max < len(sent)) :
len_max = len(sent)
print('Number of vocabs: ', len(list(unique_words)))
print('Max length of text is: ', len_max )<define_variables> | def create_model(output_n = 30):
model = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[96,96,1]),
keras.layers.Conv2D(filters=32, kernel_size=[5,5], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=32, kernel_size=[5,5], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=128, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=128, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=256, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=256, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.MaxPool2D(pool_size=[2,2]),
keras.layers.Conv2D(filters=512, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Conv2D(filters=512, kernel_size=[3,3], padding='same', use_bias=False),
keras.layers.LeakyReLU(alpha =.1),
keras.layers.BatchNormalization() ,
keras.layers.Flatten() ,
keras.layers.Dense(units=512, activation='relu'),
keras.layers.Dropout (.1),
keras.layers.Dense(units=output_n),
])
model.compile(optimizer = 'adam' , loss = "mean_squared_error", metrics=["mae"])
return model | Facial Keypoints Detection |
8,233,083 | vocab_size = len(list(unique_words))
embedding_dim = 300
max_length = len_max
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"<string_transform> | model_30 = create_model(output_n=30)
model_8 = create_model(output_n=8 ) | Facial Keypoints Detection |
8,233,083 | %%time
tokenizer = Tokenizer(num_words = vocab_size,
',
oov_token = oov_tok,
char_level = False)
tokenizer.fit_on_texts(list(X_train))
X_train = tokenizer.texts_to_sequences(X_train)
X_train = pad_sequences(X_train,
maxlen = max_length,
padding = padding_type,
truncating = trunc_type)
X_val = tokenizer.texts_to_sequences(X_val)
X_val = pad_sequences(X_val,
maxlen = max_length,
padding = padding_type,
truncating = trunc_type)
X_test = tokenizer.texts_to_sequences(test_sentences)
X_test = pad_sequences(X_test,
maxlen = max_length,
padding = padding_type,
truncating = trunc_type )<choose_model_class> | LR_callback = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=4, verbose=10, factor=.4, min_lr=.00001)
EarlyStop_callback = keras.callbacks.EarlyStopping(patience=15, restore_best_weights=True ) | Facial Keypoints Detection |
8,233,083 | model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length = max_length),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128, dropout = 0.8, recurrent_dropout=0.8, return_sequences=True)) ,
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, dropout = 0.5, recurrent_dropout=0.5, return_sequences=False)) ,
tf.keras.layers.Dense(128, activation = 'relu'),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes, activation = 'softmax')
])
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.summary()<choose_model_class> | history = model_30.fit(X_train_30, y_train_30, validation_split=.1, batch_size=64, epochs=100, callbacks=[LR_callback,EarlyStop_callback] ) | Facial Keypoints Detection |
8,233,083 | early_stopping = EarlyStopping(min_delta = 0.001,
mode = 'max',
monitor = 'val_acc',
patience = 2)
callback = [early_stopping]<train_model> | history = model_8.fit(X_train_8, y_train_8, validation_split=.1, batch_size=64, epochs=100, callbacks=[LR_callback,EarlyStop_callback] ) | Facial Keypoints Detection |
8,233,083 | %%time
num_epochs = 5
history = model.fit(X_train,
y_train,
validation_data =(X_val, y_val),
epochs = num_epochs,
batch_size = 256,
verbose = 1,
callbacks = callback )<define_variables> | X_test = str_to_array(test_csv['Image'])
print('X_test shape: ', X_test.shape ) | Facial Keypoints Detection |
8,233,083 | test_id = test['PhraseId']<predict_on_test> | y_hat_30 = model_30.predict(X_test)
y_hat_8 = model_8.predict(X_test)
print('Predictions shape', y_hat_30.shape)
print('Predictions shape', y_hat_8.shape ) | Facial Keypoints Detection |
8,233,083 | %%time
y_pred = np.argmax(model.predict(X_test), axis=-1 )<save_to_csv> | feature_8_ind = [0, 1, 2, 3, 20, 21, 28, 29]
for i in range(8):
print('Copy "{}" feature column from y_hat_8 --> y_hat_30'.format(feature_8[i]))
y_hat_30[:,feature_8_ind[i]] = y_hat_8[:,i] | Facial Keypoints Detection |
8,233,083 | submission = pd.DataFrame({'PhraseId': test_id, 'Sentiment': y_pred})
submission.to_csv('movie_review_prediction_5EP_MLBDLSTM_submission.csv', index=False)
submission.head()<set_options> | required_features = list(looktable_csv['FeatureName'])
imageID = list(looktable_csv['ImageId']-1)
feature_to_num = dict(zip(required_features[0:30], range(30)) ) | Facial Keypoints Detection |
8,233,083 | py.init_notebook_mode(connected=True)
pd.set_option('max_columns', 50 )<define_variables> | feature_ind = []
for f in required_features:
feature_ind.append(feature_to_num[f] ) | Facial Keypoints Detection |
8,233,083 | datadir = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament')
stage1dir = datadir/'MDataFiles_Stage1'<load_from_csv> | required_pred = []
for x,y in zip(imageID,feature_ind):
required_pred.append(y_hat_30[x, y] ) | Facial Keypoints Detection |
8,233,083 | <load_from_csv><EOS> | rowid = looktable_csv['RowId']
loc30 = pd.Series(required_pred,name = 'Location')
submission = pd.concat([rowid,loc30],axis = 1)
submission.to_csv('Merged_Predictions.csv',index = False ) | Facial Keypoints Detection |
15,039,251 | <SOS> metric: RMSE Kaggle data source: facial-keypoints-detection<load_from_csv> | mpl.style.use('seaborn-darkgrid' ) | Facial Keypoints Detection |
15,039,251 | tourney_seeds_df = pd.read_csv(stage1dir/'MNCAATourneySeeds.csv')
tourney_seeds_df<load_from_csv> | df_train = pd.read_csv('.. /input/facial-keypoints-detection/training.zip')
df_train.head(1 ) | Facial Keypoints Detection |
15,039,251 | regular_season_results_df = pd.read_csv(stage1dir/'MRegularSeasonCompactResults.csv')
tournament_results_df = pd.read_csv(stage1dir/'MNCAATourneyCompactResults.csv' )<load_from_csv> | feature_col, target_cols = 'Image', list(df_train.drop('Image', axis=1 ).columns ) | Facial Keypoints Detection |
15,039,251 | sample_submission = pd.read_csv(datadir/'MSampleSubmissionStage1_2020.csv')
sample_submission<load_from_csv> | IMG_WIDTH = 96
IMG_HEIGHT = 96
IMG_CHANNELS = 1
images = np.array(df_train[feature_col].str.split().tolist() , dtype='float' ).reshape(-1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
labels = df_train[target_cols].to_numpy() | Facial Keypoints Detection |
15,039,251 | regular_season_detailed_results_df = pd.read_csv(stage1dir/'MRegularSeasonDetailedResults.csv')
tournament_detailed_results_df = pd.read_csv(stage1dir/'MNCAATourneyDetailedResults.csv' )<load_from_csv> | normalized_images = images / 255 . | Facial Keypoints Detection |
15,039,251 | cities_df = pd.read_csv(stage1dir/'Cities.csv')
mgame_cities_df = pd.read_csv(stage1dir/'MGameCities.csv' )<load_from_csv> | train_images, valid_images, train_labels, valid_labels = train_test_split(normalized_images, labels, test_size=0.1, random_state=7 ) | Facial Keypoints Detection |
15,039,251 | massey_df = pd.read_csv(stage1dir/'MMasseyOrdinals.csv')
massey_df<load_from_csv> | Input,
Conv2D,
MaxPool2D,
Dense,
BatchNormalization,
ReLU,
Dropout,
Flatten,
Dropout,
Concatenate,
GlobalAvgPool2D
)
def inception_module(inputs, f1, f2):
x1 = Conv2D(f1, 3, padding='same' )(inputs)
x1 = BatchNormalization()(x1)
x1 = ReLU()(x1)
x2 = Conv2D(f2, 5, padding='same' )(inputs)
x2 = BatchNormalization()(x2)
x2 = ReLU()(x2)
return Concatenate()([x1, x2] ) | Facial Keypoints Detection |
15,039,251 | event2015_df = pd.read_csv(datadir/'MEvents2015.csv')
<load_from_csv> | def build_model() :
inputs = Input(( 96, 96, 1))
x = inception_module(inputs, 64, 32)
x = MaxPool2D()(x)
x = inception_module(x, 64, 32)
x = MaxPool2D()(x)
x = inception_module(x, 128, 32)
x = MaxPool2D()(x)
x = inception_module(x, 128, 32)
x = MaxPool2D()(x)
x = inception_module(x, 256, 64)
x = MaxPool2D()(x)
x = Flatten()(x)
x = Dense(1024, kernel_regularizer=L2(l2=0.05))(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Dense(512, kernel_regularizer=L2(l2=0.02))(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Dense(128, kernel_regularizer=L2(l2=0.01))(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Dense(30 )(x)
model = tf.keras.Model(inputs, outputs=x)
return model
model = build_model()
model.summary() | Facial Keypoints Detection |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.