kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
10,966,859
test = pd.read_csv('.. /input/titanic/test.csv') X_test = test.copy() X_test['Fare'] = np.log1p(X_test.Fare) get_titles(X_test )<drop_column>
xnli_processed = xnli.map(preprocess_xnli, batched=True) xnli_encoded = xnli_processed.map(convert_to_features, batched=True, remove_columns=['premise', 'hypothesis']) xnli_encoded.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label'] )
Contradictory, My Dear Watson
10,966,859
X_test_imp = pd.DataFrame(missing_transformer.transform(X_test)) X_test_imp.columns = numerical + categorical X_test.drop(numerical + categorical, axis = 1, inplace = True) X_test = pd.concat([X_test, X_test_imp], axis = 1 )<categorify>
snli_encoded = snli.map(convert_to_features, batched=True, remove_columns=['premise', 'hypothesis']) snli_encoded.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label'] )
Contradictory, My Dear Watson
10,966,859
X_test_enc = pd.DataFrame(ordinal_encoder.fit_transform(X_test[categorical])) X_test_enc.columns = categorical X_test.drop(categorical, axis = 1, inplace = True) X_test = pd.concat([X_test, X_test_enc], axis = 1) X_test = pd.get_dummies(X_test, columns = categorical, dtype = np.int64, drop_first = True )<save_to_csv>
train_dataset = nlp.load_dataset('csv', data_files=['.. /input/contradictory-my-dear-watson/train.csv'])['train'] print(train_dataset.num_rows) print(train_dataset.column_names) drop_columns = train_dataset.column_names[:-1] encoded_train_dataset = train_dataset.map(convert_to_features, batched=True, remove_columns=drop_columns) encoded_train_dataset.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label']) print(encoded_train_dataset.num_rows) print(encoded_train_dataset.column_names )
Contradictory, My Dear Watson
10,966,859
final_clf.fit(X[features], y) final_preds = final_clf.predict(X_test[features]) final_submission = pd.DataFrame({'PassengerId' : X_test.PassengerId, 'Survived' : final_preds}) final_submission.to_csv('may12_finalpreds.csv', index = False )<load_from_csv>
train_dataset = nlp.concatenate_datasets([mnli_encoded, xnli_encoded, snli_encoded, encoded_train_dataset ]) print(train_dataset.num_rows) print(train_dataset.column_names )
Contradictory, My Dear Watson
10,966,859
train_data_initial = pd.read_csv('.. /input/titanic/train.csv', index_col='PassengerId') test_data = pd.read_csv('.. /input/titanic/test.csv', index_col='PassengerId') train_data_initial.head()<drop_column>
train_dataset.cleanup_cache_files() del mnli, mnli_encoded del xnli, xnli_encoded, xnli_processed del snli, snli_encoded gc.collect()
Contradictory, My Dear Watson
10,966,859
train_data_initial.drop(columns = ['Name','Ticket','Cabin'], inplace=True )<create_dataframe>
class DatasetRetriever(Dataset): def __init__(self, dataset:nlp.arrow_dataset.Dataset): self.dataset = dataset self.ids = self.dataset['input_ids'] self.mask = self.dataset['attention_mask'] self.type_ids = self.dataset['token_type_ids'] self.targets = self.dataset["label"] def __len__(self): return self.dataset.num_rows def __getitem__(self, index): ids = self.ids[index] mask = self.mask[index] type_ids = self.type_ids[index] targets = self.targets[index] return { 'ids':torch.tensor(ids), 'mask':torch.tensor(mask), 'type_ids':torch.tensor(type_ids), 'targets':targets }
Contradictory, My Dear Watson
10,966,859
train_data = train_data_initial.copy()<count_values>
class XLMRoberta(nn.Module): def __init__(self, num_labels, multisample): super(XLMRoberta, self ).__init__() output_hidden_states = False self.num_labels = num_labels self.multisample= multisample self.roberta = XLMRobertaModel.from_pretrained("xlm-roberta-large", output_hidden_states=output_hidden_states, num_labels=1) self.layer_norm = nn.LayerNorm(1024*2) self.dropout = nn.Dropout(p=0.2) self.high_dropout = nn.Dropout(p=0.5) self.classifier = nn.Linear(1024*2, self.num_labels) def forward(self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None): outputs = self.roberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds) average_pool = torch.mean(outputs[0], 1) max_pool, _ = torch.max(outputs[0], 1) concatenate_layer = torch.cat(( average_pool, max_pool), 1) normalization = self.layer_norm(concatenate_layer) if self.multisample: logits = torch.mean( torch.stack( [self.classifier(self.dropout(normalization)) for _ in range(5)], dim=0, ), dim=0, ) else: logits = self.dropout(normalization) logits = self.classifier(logits) outputs = logits return outputs
Contradictory, My Dear Watson
10,966,859
train_data['Age'].fillna(value = train_data['Age'].mean() , inplace = True) train_data['Embarked'].fillna(value = train_data['Embarked'].value_counts().idxmax() , inplace = True )<define_variables>
class AverageMeter(object): def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '}({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) class ProgressMeter(object): def __init__(self, num_batches, meters, prefix=""): self.batch_fmtstr = self._get_batch_fmtstr(num_batches) self.meters = meters self.prefix = prefix def display(self, batch): entries = [self.prefix + self.batch_fmtstr.format(batch)] entries += [str(meter)for meter in self.meters] print('\t'.join(entries)) def _get_batch_fmtstr(self, num_batches): num_digits = len(str(num_batches // 1)) fmt = '{:' + str(num_digits)+ 'd}' return '[' + fmt + '/' + fmt.format(num_batches)+ ']' def accuracy(output, target, topk=(1,)) : with torch.no_grad() : maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1 ).expand_as(pred)) res = [] for k in topk: correct_k = correct[:k].view(-1 ).float().sum(0, keepdim=True) res.append(correct_k.mul_(100.0 / batch_size)) return res
Contradictory, My Dear Watson
10,966,859
s =(train_data.dtypes == 'object') object_cols = list(s[s].index) print("Categorical variables:") print(object_cols )<categorify>
def get_model_optimizer(model): def is_backbone(name): return "roberta" in name optimizer_grouped_parameters = [ {'params': [param for name, param in model.named_parameters() if is_backbone(name)], 'lr': LR}, {'params': [param for name, param in model.named_parameters() if not is_backbone(name)], 'lr': 1e-3} ] optimizer = AdamW( optimizer_grouped_parameters, lr=LR, weight_decay=1e-2 ) return optimizer
Contradictory, My Dear Watson
10,966,859
label_train_data = train_data.copy() label_encoder = LabelEncoder() for col in object_cols: label_train_data[col] = label_encoder.fit_transform(train_data[col] )<prepare_x_and_y>
def loss_fn(outputs, targets): return nn.CrossEntropyLoss()(outputs, targets )
Contradictory, My Dear Watson
10,966,859
target_col = 'Survived' y = label_train_data[target_col] X = label_train_data.drop(columns=[target_col]) X.head()<split>
def train_loop_fn(train_loader, model, optimizer, device, scheduler, epoch=None): batch_time = AverageMeter('Time', ':6.3f') data_time = AverageMeter('Data', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') progress = ProgressMeter( len(train_loader), [batch_time, data_time, losses, top1], prefix="[xla:{}]Train: Epoch: [{}]".format(xm.get_ordinal() , epoch) ) model.train() end = time.time() for i, data in enumerate(train_loader): data_time.update(time.time() -end) ids, mask, type_ids, targets = data["input_ids"], data["attention_mask"], data['token_type_ids'], data["label"] ids = ids.to(device, dtype=torch.long) mask = mask.to(device, dtype=torch.long) type_ids = type_ids.to(device, dtype=torch.long) targets = targets.to(device, dtype=torch.float) optimizer.zero_grad() outputs = model( input_ids = ids, attention_mask = mask, token_type_ids = type_ids ) loss = loss_fn(outputs, targets) loss.backward() xm.optimizer_step(optimizer) loss = loss_fn(outputs, targets) acc1= accuracy(outputs, targets, topk=(1,)) losses.update(loss.item() , ids.size(0)) top1.update(acc1[0].item() , ids.size(0)) scheduler.step() batch_time.update(time.time() - end) end = time.time() if i % 50 == 0: progress.display(i) del loss del outputs del ids del mask del targets gc.collect()
Contradictory, My Dear Watson
10,966,859
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0 )<define_search_space>
def eval_loop_fn(validation_loader, model, device): model.eval() batch_time = AverageMeter('Time', ':6.3f') losses = AverageMeter('Loss', ':.4e') top1 = AverageMeter('Acc@1', ':6.2f') learning_rate = AverageMeter('LR',':2.8f') progress = ProgressMeter( len(validation_loader), [batch_time, losses, top1], prefix='[xla:{}]Validation: '.format(xm.get_ordinal())) with torch.no_grad() : end = time.time() for i, data in enumerate(validation_loader): ids, mask, type_ids, targets = data["input_ids"], data["attention_mask"], data['token_type_ids'], data["label"] ids = ids.to(device, dtype=torch.long) mask = mask.to(device, dtype=torch.long) type_ids = type_ids.to(device, dtype=torch.long) targets = targets.to(device, dtype=torch.float) outputs = model( input_ids = ids, attention_mask = mask, token_type_ids = type_ids ) loss = loss_fn(outputs, targets) acc1= accuracy(outputs, targets, topk=(1,)) losses.update(loss.item() , ids.size(0)) top1.update(acc1[0].item() , ids.size(0)) batch_time.update(time.time() - end) end = time.time() if i % 50 == 0: progress.display(i) del loss del outputs del ids del mask del targets gc.collect()
Contradictory, My Dear Watson
10,966,859
preds_dict = [] for n_estimators in range(100,1001,100): for max_depth in range(6, 70,10): for max_leaf_nodes in range(5, 500, 50): parameters = {'n_estimators': n_estimators, 'max_depth': max_depth, 'max_leaf_nodes': max_leaf_nodes } model = RandomForestClassifier(**parameters) model.fit(X_train, y_train) preds = model.predict(X_valid) prediction = {} prediction['n_estimators'] = n_estimators prediction['max_depth'] = max_depth prediction['max_leaf_nodes'] = max_leaf_nodes prediction['accuracy_score'] = accuracy_score(y_true=y_valid, y_pred=preds) preds_dict.append(prediction) print(preds_dict )<define_variables>
WRAPPED_MODEL = xmp.MpModelWrapper(XLMRoberta(num_labels=3, multisample=False)) dataset = train_dataset.train_test_split(test_size=0.1) train_dataset = dataset['train'] valid_dataset = dataset['test'] train_dataset.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label']) valid_dataset.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label'] )
Contradictory, My Dear Watson
10,966,859
count = 0 indexIs = 0 maxValue = preds_dict[0]['accuracy_score'] for i in preds_dict: if maxValue < i['accuracy_score']: print(count , ' :', i['accuracy_score']) maxValue = i['accuracy_score'] indexIs = count count = count + 1 print(count ,': Max Val is :',maxValue) print(preds_dict[indexIs] )<load_from_csv>
def _run() : xm.master_print('Starting Run...') train_sampler = DistributedSampler( train_dataset, num_replicas=xm.xrt_world_size() , rank=xm.get_ordinal() , shuffle=False ) train_data_loader = DataLoader( train_dataset, batch_size=TRAIN_BATCH_SIZE, sampler=train_sampler, drop_last=True, num_workers=0 ) xm.master_print('Train Loader Created.') valid_sampler = DistributedSampler( valid_dataset, num_replicas=xm.xrt_world_size() , rank=xm.get_ordinal() , shuffle=False ) valid_data_loader = DataLoader( valid_dataset, batch_size=VALID_BATCH_SIZE, sampler=valid_sampler, drop_last=True, num_workers=0 ) xm.master_print('Valid Loader Created.') num_train_steps = int(len(train_dataset)/ TRAIN_BATCH_SIZE / xm.xrt_world_size()) device = xm.xla_device() model = WRAPPED_MODEL.to(device) xm.master_print('Done Model Loading.') optimizer = get_model_optimizer(model) scheduler = get_cosine_schedule_with_warmup( optimizer, num_warmup_steps = 0, num_training_steps = num_train_steps * EPOCHS ) xm.master_print(f'Num Train Steps= {num_train_steps}, XRT World Size= {xm.xrt_world_size() }.') for epoch in range(EPOCHS): para_loader = pl.ParallelLoader(train_data_loader, [device]) xm.master_print('Parallel Loader Created.Training...') train_loop_fn(para_loader.per_device_loader(device), model, optimizer, device, scheduler, epoch ) xm.master_print("Finished training epoch {}".format(epoch)) para_loader = pl.ParallelLoader(valid_data_loader, [device]) xm.master_print('Parallel Loader Created.Validating...') eval_loop_fn(para_loader.per_device_loader(device), model, device ) if epoch == EPOCHS-1: xm.master_print('Saving Model.. ') xm.save(model.state_dict() , "model.bin") xm.master_print('Model Saved.') if METRICS_DEBUG: xm.master_print(met.metrics_report() , flush=True )
Contradictory, My Dear Watson
10,966,859
<drop_column><EOS>
def _mp_fn(rank, flags): _run() FLAGS={} xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=8, start_method='fork' )
Contradictory, My Dear Watson
8,005,170
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<define_variables>
warnings.filterwarnings("ignore")
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
prediction.index = range(0,len(prediction))<load_from_csv>
result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') result = result.drop(columns=['WLoc', 'NumOT', 'DayNum']) result.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
sub_format = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/submission.csv' )<drop_column>
seeds = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') seeds.Seed = seeds.Seed.map(lambda string : int(string[1:3])) seeds.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
sub_format = sub_format['ForecastId']<concatenate>
team_name = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MTeamSpellings.csv', encoding='cp1252') team_name = team_name.drop_duplicates(subset=['TeamID'], keep='last' ).reset_index(drop=True) team_name.TeamNameSpelling = team_name.TeamNameSpelling.map(lambda string : string.upper()) team_name.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
final = pd.concat([sub_format,prediction],axis=1 )<save_to_csv>
Wname = team_name.rename(columns={'TeamNameSpelling':'Wteam_name', 'TeamID':'WTeamID'}) team_result = pd.merge(left=result, right=Wname, how='left', on=['WTeamID']) win_by_year = team_result.groupby(['Season', 'Wteam_name'])\ ['WScore'].count().reset_index() \ .rename(columns={'Wteam_name':'Team', 'WScore':'Win_matches'}) df = win_by_year[win_by_year.Win_matches == win_by_year.Win_matches.max() ].drop(columns=['Win_matches'] ).set_index('Season') df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
final.to_csv('submission.csv',index=False )<set_options>
win_by_year = win_by_year.groupby(['Team', 'Season'] ).Win_matches.sum() \ .unstack(fill_value=0 ).cumsum(axis=1 ).sort_values(by=2019, ascending=False ).head(15) cum_win = pd.melt(win_by_year, value_vars=win_by_year.columns, value_name="Win_matches") cum_win['Team'] = list(win_by_year.index)*35 cum_win = cum_win[['Season', 'Team', 'Win_matches']]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
pd.set_option('display.max.columns',3000 )<load_from_csv>
Wseeds = seeds.rename(columns={'TeamID':'WTeamID', 'Seed':'WSeed'}) Lseeds = seeds.rename(columns={'TeamID':'LTeamID', 'Seed':'LSeed'}) data = pd.merge(left=result, right=Wseeds, how='left', on=['Season', 'WTeamID']) data = pd.merge(left=data, right=Lseeds, on=['Season', 'LTeamID']) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train=pd.read_csv('/kaggle/input/knit-hacks/train.csv') test=pd.read_csv('/kaggle/input/knit-hacks/test.csv' )<count_values>
scores = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') scores.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train['Col2'].value_counts() /train.shape[0]<count_missing_values>
season_score = result_scores.groupby(['Season', 'TeamID'])['Score'].sum() season_score.sort_values(ascending=False ).head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train.isnull().sum().any()<define_variables>
result_scores.sort_values(by='Score', ascending=False ).head(15 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
cat_col=[col for col in train.columns if train[col].dtype=='O']<drop_column>
team_name[team_name.TeamID.isin([1258, 1328])]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train=train.drop(cat_col,axis=1 )<create_dataframe>
data = pd.merge(data, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') data = data.rename(columns={'Score':'WScoreT'}) data = pd.merge(data, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') data = data.rename(columns={'Score':'LScoreT'}) data = data.drop(columns=['WScore', 'LScore']) data.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
test_new=test.copy(deep=True )<drop_column>
Wdata = data.drop(columns=['Season', ]) Wdata.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2', 'WTeamID':'TeamID_1', 'LTeamID': 'TeamID_2'}, inplace=True) Wdata.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
test_new=test_new.drop(cat_col,axis=1 )<correct_missing_values>
Ldata = data[['LTeamID', 'WTeamID', 'LSeed', 'WSeed', 'LScoreT', 'WScoreT']] Ldata.rename(columns={'LTeamID':'TeamID_1', 'WTeamID':'TeamID_2', 'LSeed':'Seed1', 'WSeed':'Seed2', 'LScoreT':'ScoreT1', 'WScoreT':'ScoreT2',}, inplace=True) Ldata.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train=train.fillna(0 )<correct_missing_values>
Wdata['Seed_diff'] = Wdata['Seed1'] - Wdata['Seed2'] Wdata['ScoreT_diff'] = Wdata['ScoreT1'] - Wdata['ScoreT2'] Ldata['Seed_diff'] = Ldata['Seed1'] - Ldata['Seed2'] Ldata['ScoreT_diff'] = Ldata['ScoreT1'] - Ldata['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
test_new=test_new.fillna(0 )<install_modules>
Wdata['result'] = 1 Ldata['result'] = 0 train = pd.concat(( Wdata, Ldata)).reset_index(drop=True) train.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
! pip install -U sliced<create_dataframe>
test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') test['Season'] = test.ID.map(lambda string : int(string.split('_')[0])) test['TeamID_1'] = test.ID.map(lambda string : int(string.split('_')[1])) test['TeamID_2'] = test.ID.map(lambda string : int(string.split('_')[2])) test = test.drop(columns=['ID']) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train_copy=train.copy(deep=True )<drop_column>
test = pd.merge(test, seeds, left_on=['Season', 'TeamID_1'], right_on=['Season', 'TeamID'], how='left') test.rename(columns={'Seed':'Seed1'}, inplace=True) test = test.drop('TeamID', axis=1) test = pd.merge(test, seeds, left_on=['Season', 'TeamID_2'], right_on=['Season', 'TeamID'], how='left') test.rename(columns={'Seed':'Seed2'}, inplace=True) test = test.drop('TeamID', axis=1) test = pd.merge(test, season_score, left_on=['Season', 'TeamID_1'], right_on=['Season', 'TeamID'], how='left') test.rename(columns={'Score':'ScoreT1'}, inplace=True) test = pd.merge(test, season_score, left_on=['Season', 'TeamID_2'], right_on=['Season', 'TeamID'], how='left') test.rename(columns={'Score':'ScoreT2'}, inplace=True) test['Seed_diff'] = test['Seed1'] - test['Seed2'] test['ScoreT_diff'] = test['ScoreT1'] - test['ScoreT2'] test = test.drop(columns=['Pred', 'Season']) test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
train_copy=train_copy.drop('Col2',axis=1 )<normalization>
X_train = train.drop(columns=['result']) y_train = train.result X_test = test.copy() data_full = pd.concat([X_train, X_test]) data_full.shape
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
vt=VarianceThreshold(threshold=0) vt_x=vt.fit_transform(train_copy )<categorify>
OH_cols = ['TeamID_1', 'TeamID_2'] OH_full = pd.get_dummies( data_full[OH_cols], columns=OH_cols, drop_first=True, dummy_na=True, sparse=True, ).sparse.to_coo()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
vt_test=vt.transform(test_new )<choose_model_class>
retain_full = data_full.drop(columns=OH_cols) retain_full = retain_full/retain_full.max() retain_full.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
sir = SlicedInverseRegression(n_directions=2) <train_model>
encoded_full = scipy.sparse.hstack([OH_full, retain_full, retain_full**2] ).tocsr() print(encoded_full.shape) encoded_train = encoded_full[:len(X_train)] encoded_test = encoded_full[len(X_train):]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
sir.fit(vt_x,train['Col2'] )<normalization>
submission = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') submission.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
X_sir=sir.transform(vt_x )<normalization>
LGB = LGBMClassifier( n_estimators=10000, learning_rate =0.005, max_depth=-1, objective= 'binary', eval_metric='cross_entropy', first_metric_only=True, )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
X_test=sir.transform(vt_test )<import_modules>
n_folds = 10 cv = StratifiedKFold(n_splits=n_folds, shuffle=True) losses = [] LGB_predicts = [] for i,(train,valid)in enumerate(cv.split(encoded_train, y_train)) : LGB.fit(encoded_train[train], y_train[train], eval_set=[(encoded_train[train], y_train[train]),(encoded_train[valid, :], y_train[valid])], verbose=False) test_pred = LGB.predict_proba(encoded_test)[:,1] LGB_predicts.append(test_pred) LGB_predicts = np.asarray(LGB_predicts) LGB_predict = np.mean(LGB_predicts, axis=0 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
from sklearn.linear_model import LogisticRegression<train_model>
submission.Pred = LGB_predict submission.to_csv(f'LGB.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
lr=LogisticRegression().fit(X_sir,train['Col2'] )<create_dataframe>
opt_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') opt_result = opt_result[opt_result.Season > 2014].reset_index() opt_result = opt_result[['Season', 'WTeamID', 'LTeamID']] opt_result.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
submission=pd.DataFrame({'Col1':test.Col1,'Col2':lr.predict(X_test)} )<save_to_csv>
for index, match in opt_result.iterrows() : opt_result.loc[index, 'ID'] = f"{match.Season}_{'_'.join(str(num)for num in sorted([match.WTeamID,match.LTeamID])) }" opt_result.loc[index, 'Pred'] = 1 if match.WTeamID < match.LTeamID else 0 opt_result = opt_result[['ID', 'Pred']] opt_result.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
submission.to_csv('submission_reduction.csv',index=False )<load_from_csv>
opt_test = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv' ).drop(columns='Pred') opt_test.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,005,170
<feature_engineering><EOS>
predict = pd.merge(left=opt_test, right=opt_result, how='left', on='ID') predict.Pred = predict.Pred.fillna(0) predict.to_csv('Phan_Viet_Hoang.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<import_modules>
py.init_notebook_mode(connected=True) pd.set_option('max_columns', 50 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
import matplotlib.pyplot as plt<data_type_conversions>
datadir = Path('/kaggle/input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament') stage1dir = datadir/'MDataFiles_Stage1'
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
datetime_str = '01/22/20 00:00:00' datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S') data['days']=pd.to_datetime(data['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D') test['days']=pd.to_datetime(test['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D' )<sort_values>
teams_df = pd.read_csv(stage1dir/'MTeams.csv') print('teams_df', teams_df.shape) teams_df.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:].sort_values(by="Date" )<import_modules>
seasons_df = pd.read_csv(stage1dir/'MSeasons.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
from statsmodels.tsa.arima_model import ARIMA <data_type_conversions>
tourney_seeds_df = pd.read_csv(stage1dir/'MNCAATourneySeeds.csv') tourney_seeds_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
data['Date']=pd.to_datetime(data['Date']) test['Date']=pd.to_datetime(test['Date'] )<create_dataframe>
regular_season_results_df = pd.read_csv(stage1dir/'MRegularSeasonCompactResults.csv') tournament_results_df = pd.read_csv(stage1dir/'MNCAATourneyCompactResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
pd.DataFrame(data.loc[data['Country_Region']=='Afghanistan',['ConfirmedCases']] ).reset_index(drop=True )<count_missing_values>
sample_submission = pd.read_csv(datadir/'MSampleSubmissionStage1_2020.csv') sample_submission
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
data.isna().sum(axis=0 )<filter>
regular_season_detailed_results_df = pd.read_csv(stage1dir/'MRegularSeasonDetailedResults.csv') tournament_detailed_results_df = pd.read_csv(stage1dir/'MNCAATourneyDetailedResults.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
data['ConfirmedCases'][data['Country_Region']==''][51:]<count_values>
cities_df = pd.read_csv(stage1dir/'Cities.csv') mgame_cities_df = pd.read_csv(stage1dir/'MGameCities.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
data['ConfirmedCases'][data['Country_Region']=='India'].value_counts()<create_dataframe>
massey_df = pd.read_csv(stage1dir/'MMasseyOrdinals.csv') massey_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
pd.DataFrame(data.loc[data['Country_Region']=='India',['ConfirmedCases']] )<data_type_conversions>
event2015_df = pd.read_csv(datadir/'MEvents2015.csv')
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
datetime_str = '03/22/20 00:00:00' datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S' )<import_modules>
players_df = pd.read_csv(datadir/'MPlayers.csv') players_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
from datetime import timedelta<import_modules>
team_coaches_df = pd.read_csv(stage1dir/'MTeamCoaches.csv') print('team_coaches_df', team_coaches_df.shape) team_coaches_df.iloc[80:85]
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
import math<compute_test_metric>
conferences_df = pd.read_csv(stage1dir/'Conferences.csv') team_conferences_df = pd.read_csv(stage1dir/'MTeamConferences.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
def rmsle(y, y_pred): assert len(y)== len(y_pred) terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(terms_to_sum)*(1.0/len(y)))** 0.5<train_model>
team_conferences_df[team_conferences_df['TeamID'] == 1102]
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
def evaluate_arima_model(X,forecast_days, arima_order): X=[x for x in X] train_size = int(len(X)* 0.9) train, test1 = X[0:train_size], X[train_size:] history=train model = ARIMA(history, order=arima_order) model_fit = model.fit(disp=0) predictions = list() predictions =model_fit.forecast(steps=len(test1)) [0] model = ARIMA(X, order=arima_order) model_fit = model.fit(disp=0) if np.isnan(model_fit.forecast(steps=forecast_days)[0] ).sum() >0: return float('inf') error = rmsle(test1, predictions) return error<find_best_params>
conference_tourney_games_df = pd.read_csv(stage1dir/'MConferenceTourneyGames.csv') conference_tourney_games_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
def evaluate_models(dataset,forcast_days, p_values, d_values, q_values): best_score, best_cfg = float("inf"),(0,0,0) for p in p_values: for d in d_values: for q in q_values: order =(p,d,q) try: mse = evaluate_arima_model(dataset,forcast_days, order) if mse < best_score: best_score, best_cfg = mse, order except: continue print('Best ARIMA%s MSE=%.3f' %(best_cfg, best_score)) return best_cfg, best_score<set_options>
secondary_tourney_teams_df = pd.read_csv(stage1dir/'MSecondaryTourneyTeams.csv') secondary_tourney_teams_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
warnings.filterwarnings('ignore' )<feature_engineering>
secondary_tourney_results_df = pd.read_csv(stage1dir/'MSecondaryTourneyCompactResults.csv') secondary_tourney_results_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
test['ConfirmedCases']=0 test['Fatalities']=0<filter>
team_spellings_df = pd.read_csv(stage1dir/'MTeamSpellings.csv',encoding="cp1252" )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
sliced_data=data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:]<import_modules>
tourney_slots_df = pd.read_csv(stage1dir/'MNCAATourneySlots.csv') tourney_seed_round_slots_df = pd.read_csv(stage1dir/'MNCAATourneySeedRoundSlots.csv' )
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
from pandas import read_csv from pandas import datetime from matplotlib import pyplot from statsmodels.tsa.arima_model import ARIMA from sklearn.metrics import mean_squared_error from math import sqrt from time import time from sklearn.metrics import mean_squared_error<remove_duplicates>
tourney_slots_df[(tourney_slots_df['Season'] == 1985)&(tourney_slots_df['Slot'].str.startswith('R1W')) ]
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
country='India' state='' sliced_data=data.loc[(data['Province_State']==state)&(data['Country_Region']==country),:] test_sliced=test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:] sliced_data=sliced_data.drop_duplicates() sliced_data=sliced_data.reset_index(drop=True) sliced_data=sliced_data.sort_values(by='Date') if sliced_data.loc[sliced_data['ConfirmedCases']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['ConfirmedCases']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['ConfirmedCases'].to_list() if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(10),range(7),range(7)) preds=[] model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions=pd.DataFrame() predictions['Date']=dates predictions['ConfirmedCases']=preds test_sliced=test_sliced.merge(sliced_data[['Date','ConfirmedCases']], on='Date',how='left') test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] test_sliced=test_sliced.merge(predictions, on='Date',how='left') test_sliced['ConfirmedCases_x'][test_sliced['ConfirmedCases_x'].isna() ]=test_sliced['ConfirmedCases_y'][test_sliced['ConfirmedCases_x'].isna() ] test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_x'] del test_sliced['ConfirmedCases_y'] del test_sliced['ConfirmedCases_x'] sliced_data_bck=sliced_data.copy() if sliced_data.loc[sliced_data['Fatalities']>0,:].shape[0]>0: sliced_data=sliced_data.loc[sliced_data['Fatalities']>0,:] sliced_data=sliced_data.reset_index(drop=True) max_date_train=sliced_data['Date'].max() max_date_test=test_sliced['Date'].max() forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D')) history=sliced_data['Fatalities'].to_list() if len(history)==1: history.append(history[0]) best_cfg,best_score=evaluate_models(history,forcast_days,range(5),range(5),range(5)) preds=[] model=None model = ARIMA(history, order=best_cfg) model_fit = model.fit(disp=0) preds=model_fit.forecast(steps=forcast_days)[0] preds=[round(p)if p>0 else 0 for p in preds] dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)] predictions_f=pd.DataFrame() predictions_f['Date']=dates predictions_f['Fatalities']=preds test_sliced=test_sliced.merge(sliced_data_bck[['Date','Fatalities']], on='Date',how='left') test_sliced['Fatalities']=test_sliced['Fatalities_y'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test_sliced=test_sliced.merge(predictions_f, on='Date',how='left') test_sliced['Fatalities_x'][test_sliced['Fatalities_x'].isna() ]=test_sliced['Fatalities_y'][test_sliced['Fatalities_x'].isna() ] test_sliced['Fatalities']=test_sliced['Fatalities_x'] del test_sliced['Fatalities_y'] del test_sliced['Fatalities_x'] test=test.merge(test_sliced,on='ForecastId',how='left') test['ConfirmedCases_x'][test['ConfirmedCases_y'].notna() ]=test['ConfirmedCases_y'][test['ConfirmedCases_y'].notna() ] test['Fatalities_x'][test['Fatalities_y'].notna() ]=test['Fatalities_y'][test['Fatalities_y'].notna() ] new_cols=[] for col in test.columns: if col[-2:]=='_y': del test[col] elif col[-2:]=='_x': new_cols.append(col[:-2]) else: new_cols.append(col) test.columns=new_cols test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:].head() plt.plot('Date', 'ConfirmedCases', data=sliced_data, color='blue', linewidth=2) plt.plot('Date','ConfirmedCases',data=test_sliced,color='orange',linewidth=2) plt.plot('Date', 'Fatalities', data=sliced_data, color='purple', linewidth=2) plt.plot('Date','Fatalities',data=test_sliced,color='red',linewidth=2) plt.show()<filter>
tournament_results2015_df = tournament_results_df.query("Season >= 2015") tournament_results2015_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
test.loc[(test['Province_State']==state)&(test['Country_Region']==country),['Country_Region','Date','ConfirmedCases','Fatalities']]<feature_engineering>
for key, row in tournament_results2015_df.iterrows() : if row['WTeamID'] < row['LTeamID']: id_name = str(row['Season'])+ '_' + str(row['WTeamID'])+ '_' + str(row['LTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 1.0 else: id_name = str(row['Season'])+ '_' + str(row['LTeamID'])+ '_' + str(row['WTeamID']) sample_submission.loc[sample_submission['ID'] == id_name, 'Pred'] = 0.0
Google Cloud & NCAA® ML Competition 2020-NCAAM
7,995,295
<remove_duplicates><EOS>
sample_submission.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<save_to_csv>
lgb_num_leaves_max = 255 lgb_in_leaf = 50 lgb_lr = 0.0001 lgb_bagging = 7 xgb_max_depth = 20 xgb_min_child_weight = 75 xgb_lr = 0.0005 xgb_num_boost_round_max = 4000 w_lgb = 0.6 w_xgb = 0.3 w_logreg = 1 - w_lgb - w_xgb w_logreg
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
output=pd.DataFrame() output['ForecastId']=test['ForecastId'].astype(int) output['ConfirmedCases']=test['ConfirmedCases'].astype(int) output['Fatalities']=test['Fatalities'].astype(int) output.to_csv('submission.csv',index=False )<load_from_csv>
warnings.filterwarnings("ignore" )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
train_dataset = pd.read_csv('/kaggle/input/titanic/train.csv') test_dataset = pd.read_csv('/kaggle/input/titanic/test.csv') X_train = train_dataset.iloc[:, [False,False,True,False,True,True,True,True,False,True,False,False]].values y_train = train_dataset.iloc[:, 1].values X_test = test_dataset.iloc[:, [False,True,False,True,True,True,True,False,True,False,False]].values<categorify>
tourney_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneyCompactResults.csv') tourney_result = tourney_result[tourney_result['Season'] < 2015] tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') tourney_seed = tourney_seed[tourney_seed['Season'] < 2015] tourney_result = tourney_result.drop(['DayNum', 'WScore', 'LScore', 'WLoc', 'NumOT'], axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'WSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Seed':'LSeed'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
ct = ColumnTransformer(transformers = [('encoder',OneHotEncoder() ,[1])],remainder = 'passthrough') <normalization>
def get_seed(x): return int(x[1:3]) tourney_result['WSeed'] = tourney_result['WSeed'].map(lambda x: get_seed(x)) tourney_result['LSeed'] = tourney_result['LSeed'].map(lambda x: get_seed(x))
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
X_train = np.array(ct.fit_transform(X_train)) X_test = np.array(ct.fit_transform(X_test))<train_model>
season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') season_result = season_result[season_result['Season'] < 2015] season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
imputer = SimpleImputer(missing_values = np.nan, strategy = 'mean') imputer.fit(X_train) X_train = imputer.transform(X_train) imputer.fit(X_test) X_test = imputer.transform(X_test )<normalization>
tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'WScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1) tourney_result = pd.merge(tourney_result, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') tourney_result.rename(columns={'Score':'LScoreT'}, inplace=True) tourney_result = tourney_result.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test )<feature_engineering>
tourney_win_result = tourney_result.drop(['Season', 'WTeamID', 'LTeamID'], axis=1) tourney_win_result.rename(columns={'WSeed':'Seed1', 'LSeed':'Seed2', 'WScoreT':'ScoreT1', 'LScoreT':'ScoreT2'}, inplace=True )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
for i in range(len(X_train)) : if X_train[i][0]>X_train[i][1]: X_train[i][0] = 1 X_train[i][1] = 0 else: X_train[i][0] = 0 X_train[i][1] = 1 print(X_train )<feature_engineering>
tourney_lose_result = tourney_win_result.copy() tourney_lose_result['Seed1'] = tourney_win_result['Seed2'] tourney_lose_result['Seed2'] = tourney_win_result['Seed1'] tourney_lose_result['ScoreT1'] = tourney_win_result['ScoreT2'] tourney_lose_result['ScoreT2'] = tourney_win_result['ScoreT1']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
for i in range(len(X_test)) : if X_test[i][0]>X_test[i][1]: X_test[i][0] = 1 X_test[i][1] = 0 else: X_test[i][0] = 0 X_test[i][1] = 1 print(X_test )<train_model>
tourney_win_result['Seed_diff'] = tourney_win_result['Seed1'] - tourney_win_result['Seed2'] tourney_win_result['ScoreT_diff'] = tourney_win_result['ScoreT1'] - tourney_win_result['ScoreT2'] tourney_lose_result['Seed_diff'] = tourney_lose_result['Seed1'] - tourney_lose_result['Seed2'] tourney_lose_result['ScoreT_diff'] = tourney_lose_result['ScoreT1'] - tourney_lose_result['ScoreT2']
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski',p=2) classifier.fit(X_train, y_train )<predict_on_test>
tourney_win_result['result'] = 1 tourney_lose_result['result'] = 0 train_df = pd.concat(( tourney_win_result, tourney_lose_result)).reset_index(drop=True) train_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
y_test = classifier.predict(X_test) print(y_test )<save_to_csv>
test_df = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MSampleSubmissionStage1_2020.csv') sub = test_df.copy()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
output = pd.DataFrame({'PassengerId': test_dataset.PassengerId, 'Survived': y_test}) output.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )<set_options>
test_df['Season'] = test_df['ID'].map(lambda x: int(x[:4])) test_df['WTeamID'] = test_df['ID'].map(lambda x: int(x[5:9])) test_df['LTeamID'] = test_df['ID'].map(lambda x: int(x[10:14]))
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) class Model1Xgb: def __init__(self): self.model = None def fit(self, tr_x, tr_y, va_x, va_y): params = { 'booster': 'gbtree', 'objective': 'binary:logistic', 'eta': 0.1, 'gamma': 0.1, 'reg_alpha':0.01 , 'reg_lambda': 0, 'min_child_weight': 4, 'max_depth': 9, 'subsample': 0.8, 'colsample_bytree': 0.75, 'scale_pos_weight': 1, 'random_state': 1 } num_round = 30 dtrain = xgb.DMatrix(tr_x, label=tr_y) dvalid = xgb.DMatrix(va_x, label=va_y) watchlist = [(dtrain, 'train'),(dvalid, 'eval')] self.model = xgb.train(params, dtrain, num_round, evals=watchlist) def predict(self, x): data = xgb.DMatrix(x) pred = self.model.predict(data) return pred class Model1NN: def __init__(self, params): self.params = params self.model = None self.scaler = None def fit(self, tr_x, tr_y, va_x, va_y): input_dropout = self.params['input_dropout'] hidden_layers = int(self.params['hidden_layers']) hidden_units = int(self.params['hidden_units']) hidden_activation = self.params['hidden_activation'] hidden_dropout = self.params['hidden_dropout'] batch_norm = self.params['batch_norm'] optimizer_type = self.params['optimizer']['type'] optimizer_lr = self.params['optimizer']['lr'] batch_size = int(self.params['batch_size']) self.scaler = StandardScaler() self.scaler.fit(tr_x) batch_size = 32 epochs = 20 tr_x = self.scaler.transform(tr_x) va_x = self.scaler.transform(va_x) model = Sequential() model.add(Dropout(input_dropout, input_shape=(tr_x.shape[1],))) for i in range(hidden_layers): model.add(Dense(hidden_units)) if batch_norm == 'before_act': model.add(BatchNormalization()) if hidden_activation == 'prelu': model.add(PReLU()) elif hidden_activation == 'relu': model.add(ReLU()) else: raise NotImplementedError model.add(Dropout(hidden_dropout)) model.add(Dense(1, activation='sigmoid')) if optimizer_type == 'sgd': optimizer = SGD(lr=optimizer_lr, decay=1e-6, momentum=0.9, nesterov=True) elif optimizer_type == 'adam': optimizer = Adam(lr=optimizer_lr, beta_1=0.9, beta_2=0.999, decay=0.) else: raise NotImplementedError model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy']) history = model.fit(tr_x, tr_y, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(va_x, va_y)) self.model = model def predict(self, x): x = self.scaler.transform(x) pred = self.model.predict_proba(x ).reshape(-1) return pred class Model2Linear: def __init__(self): self.model = None self.scaler = None def fit(self, tr_x, tr_y, va_x, va_y): self.scaler = StandardScaler() self.scaler.fit(tr_x) tr_x = self.scaler.transform(tr_x) self.model = LogisticRegression(solver='lbfgs', C=100, random_state=1) self.model.fit(tr_x, tr_y) def predict(self, x): x = self.scaler.transform(x) pred = self.model.predict_proba(x)[:, 1] return pred def predict_cv(model, train_x, train_y, test_x): preds = [] preds_test = [] va_idxes = [] kf = KFold(n_splits=4, shuffle=True, random_state=1) for i,(tr_idx, va_idx)in enumerate(kf.split(train_x)) : tr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx] tr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx] model.fit(tr_x, tr_y, va_x, va_y) pred = model.predict(va_x) preds.append(pred) pred_test = model.predict(test_x) preds_test.append(pred_test) va_idxes.append(va_idx) va_idxes = np.concatenate(va_idxes) preds = np.concatenate(preds, axis=0) order = np.argsort(va_idxes) pred_train = preds[order] preds_test = np.mean(preds_test, axis=0) return pred_train, preds_test train_df = pd.read_csv('.. /input/titanic/train.csv') test_df = pd.read_csv('.. /input/titanic/test.csv') train_y = train_df['Survived'] train_df = train_df.drop(["Ticket", "Cabin"], axis=1) test_df = test_df.drop(["Ticket", "Cabin"], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Title'] = dataset.Name.str.extract('([A-Za-z]+)\.', expand=False) df_list = pd.crosstab(train_df['Title'], train_df['Sex']) for dataset in combine: dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare') dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') dataset['Title'] = dataset['Title'].replace('Ms', 'Miss') dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') train_df[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean() title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5} combine = [train_df, test_df] for dataset in combine: dataset["Title"] = dataset["Title"].map({"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}) dataset['Title'] = dataset['Title'].fillna(0) train_df = train_df.drop(['Name', 'PassengerId'], axis=1) test_df = test_df.drop(['Name'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Sex'] = dataset['Sex'].map({'female': 1, 'male': 0} ).astype(int) train_df['AgeBand'] = pd.cut(train_df['Age'], 5) train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False ).mean().sort_values(by='AgeBand', ascending=True) for dataset in combine: dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0 dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1 dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2 dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3 dataset.loc[ dataset['Age'] > 64, 'Age'] train_df = train_df.drop(['AgeBand'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1 train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False ).mean().sort_values(by='Survived', ascending=False) for dataset in combine: dataset['IsAlone'] = 0 dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1 train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False ).mean() train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1) combine = [train_df, test_df] for dataset in combine: dataset['Age*Class'] = dataset.Age * dataset.Pclass freq_port = train_df.Embarked.dropna().mode() [0] for dataset in combine: dataset['Embarked'] = dataset['Embarked'].fillna(freq_port) train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False ).mean().sort_values(by='Survived', ascending=False) for dataset in combine: dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int) test_df['Fare'].fillna(test_df['Fare'].dropna().median() , inplace=True) train_df['FareBand'] = pd.qcut(train_df['Fare'], 4) train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False ).mean().sort_values(by='FareBand', ascending=True) combine = [train_df, test_df] for dataset in combine: dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0 dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1 dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2 dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3 dataset['Fare'] = dataset['Fare'].astype(int) train_df = train_df.drop(['FareBand','Survived'], axis=1) test_df = test_df.drop(['PassengerId'], axis=1) train_df['Age'] = train_df['Age'].fillna(train_df['Age'].mean()) train_df['Age*Class'] = train_df['Age*Class'].fillna(train_df['Age*Class'].mean()) test_df['Age'] = test_df['Age'].fillna(test_df['Age'].mean()) test_df['Age*Class'] = test_df['Age*Class'].fillna(test_df['Age*Class'].mean()) train_x = train_df.copy() test_x = test_df.copy() train_nn = pd.read_csv('.. /input/titanic/train.csv') train_x_nn = train_df.copy() train_y_nn = train_nn['Survived'] test_x_nn = pd.read_csv('.. /input/titanic/test.csv') test_x_nn = test_df.copy() base_param = { 'input_dropout': 0.05, 'hidden_layers': 2.0, 'hidden_units': 96.0, 'hidden_activation': 'prelu', 'hidden_dropout': 0.05, 'batch_norm': 'before_act', 'optimizer': {'type': 'adam', 'lr': 0.00037640141509672924}, 'batch_size': 32.0} model_1a = Model1Xgb() pred_train_1a, pred_test_1a = predict_cv(model_1a, train_x, train_y, test_x) model_1b = Model1NN(base_param) pred_train_1b, pred_test_1b = predict_cv(model_1b, train_x_nn, train_y_nn, test_x_nn) print(f'logloss: {log_loss(train_y, pred_train_1a, eps=1e-7):.4f}') print(f'logloss: {log_loss(train_y, pred_train_1b, eps=1e-7):.4f}') train_x_2 = pd.DataFrame({'pred_1a': pred_train_1a, 'pred_1b': pred_train_1b}) test_x_2 = pd.DataFrame({'pred_1a': pred_test_1a, 'pred_1b': pred_test_1b}) model_2 = Model2Linear() pred_train_2, pred_test_2 = predict_cv(model_2, train_x_2, train_y, test_x_2) print(f'logloss: {log_loss(train_y, pred_train_2, eps=1e-7):.4f}') pred_test_2 = np.where(pred_test_2 < 0.5, 0, 1) pred_test_2 = pred_test_2.tolist() print(pred_test_2) sub = pd.read_csv('.. /input/titanic/gender_submission.csv') sub['Survived'] = list(map(int, pred_test_2)) sub.to_csv('submission_stacking3.csv', index=False) <load_from_csv>
tourney_seed = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MNCAATourneySeeds.csv') tourney_seed = tourney_seed[tourney_seed['Season'] > 2014]
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
train = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv") test = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv" )<train_model>
season_result = pd.read_csv('.. /input/google-cloud-ncaa-march-madness-2020-division-1-mens-tournament/MDataFiles_Stage1/MRegularSeasonCompactResults.csv') season_result = season_result[season_result['Season'] > 2014] season_win_result = season_result[['Season', 'WTeamID', 'WScore']] season_lose_result = season_result[['Season', 'LTeamID', 'LScore']] season_win_result.rename(columns={'WTeamID':'TeamID', 'WScore':'Score'}, inplace=True) season_lose_result.rename(columns={'LTeamID':'TeamID', 'LScore':'Score'}, inplace=True) season_result = pd.concat(( season_win_result, season_lose_result)).reset_index(drop=True) season_score = season_result.groupby(['Season', 'TeamID'])['Score'].sum().reset_index()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
data_size_train = train.memory_usage().sum() / 1024 / 1024 print("Data memory size: %.2f MB" % data_size_train )<train_model>
test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, tourney_seed, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Seed':'Seed2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'WTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT1'}, inplace=True) test_df = test_df.drop('TeamID', axis=1) test_df = pd.merge(test_df, season_score, left_on=['Season', 'LTeamID'], right_on=['Season', 'TeamID'], how='left') test_df.rename(columns={'Score':'ScoreT2'}, inplace=True) test_df = test_df.drop('TeamID', axis=1 )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
data_size_test = test.memory_usage().sum() / 1024 / 1024 print("Data memory size: %.2f MB" % data_size_test )<categorify>
test_df['Seed1'] = test_df['Seed1'].map(lambda x: get_seed(x)) test_df['Seed2'] = test_df['Seed2'].map(lambda x: get_seed(x)) test_df['Seed_diff'] = test_df['Seed1'] - test_df['Seed2'] test_df['ScoreT_diff'] = test_df['ScoreT1'] - test_df['ScoreT2'] test_df = test_df.drop(['ID', 'Pred', 'Season', 'WTeamID', 'LTeamID'], axis=1) test_df
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
train = train.drop(["PoolQC", "MiscFeature", "Alley", "Fence", "Street", "Alley"], axis = 1) none_values_train = ["FireplaceQu", "GarageCond", "GarageType", "GarageFinish", "GarageQual", "BsmtExposure", "BsmtFinType2", "BsmtFinType1", "BsmtCond", "BsmtQual", "MasVnrType", "Electrical"] for value in none_values_train: train = train.fillna({value : "None"}) zero_values_train = ["LotFrontage", "GarageYrBlt", "MasVnrArea"] for value in zero_values_train: train = train.fillna({value : 0} )<categorify>
X = train_df.drop('result', axis=1) y = train_df.result
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
test = test.drop(["PoolQC", "MiscFeature", "Alley", "Fence", "Street", "Alley"], axis = 1) none_values_test = ["FireplaceQu", "GarageCond", "GarageType", "GarageFinish", "GarageQual", "BsmtExposure", "BsmtFinType2", "BsmtFinType1", "BsmtCond", "BsmtQual", "MasVnrType", "Electrical", "MSZoning", "Utilities", "Functional", "Exterior2nd", "SaleType", "Exterior1st", "KitchenQual"] for value in none_values_test: test = test.fillna({value : "None"}) zero_values_test = ["LotFrontage", "GarageYrBlt", "MasVnrArea", "BsmtHalfBath", "BsmtFullBath", "BsmtFinSF2", "BsmtFinSF1", "TotalBsmtSF", "GarageArea", "GarageCars", "BsmtUnfSF"] for value in zero_values_test: test = test.fillna({value : 0} )<drop_column>
params_lgb = {'num_leaves': lgb_num_leaves_max, 'min_data_in_leaf': lgb_in_leaf, 'objective': 'binary', 'max_depth': -1, 'learning_rate': lgb_lr, "boosting_type": "gbdt", "bagging_seed": lgb_bagging, "metric": 'logloss', "verbosity": -1, 'random_state': 42, }
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
train = train.drop(train[train["GrLivArea"] > 4000].index )<data_type_conversions>
NFOLDS = 10 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_lgb = np.zeros(test_df.shape[0]) y_train_lgb = np.zeros(X.shape[0]) y_oof = np.zeros(X.shape[0]) feature_importances = pd.DataFrame() feature_importances['feature'] = columns for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] dtrain = lgb.Dataset(X_train, label=y_train) dvalid = lgb.Dataset(X_valid, label=y_valid) clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200) feature_importances[f'fold_{fold_n + 1}'] = clf.feature_importance() y_pred_valid = clf.predict(X_valid) y_oof[valid_index] = y_pred_valid y_train_lgb += clf.predict(X)/ NFOLDS y_preds_lgb += clf.predict(test_df)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
upper = corr.where(np.triu(np.ones(corr.shape), k = 1 ).astype(np.bool)) to_drop = [column for column in upper.columns if any(upper[column] > 0.90)] print(to_drop )<import_modules>
params_xgb = {'max_depth': xgb_max_depth, 'objective': 'binary:logistic', 'min_child_weight': xgb_min_child_weight, 'learning_rate': xgb_lr, 'eta' : 0.3, 'subsample': 0.8, 'lambda ' : 4, 'eval_metric': 'logloss', 'colsample_bytree ': 0.9, 'colsample_bylevel': 1 }
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
from sklearn.linear_model import Ridge, Lasso, LinearRegression from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, StackingRegressor from sklearn.model_selection import GridSearchCV, train_test_split, cross_val_score, learning_curve from sklearn.preprocessing import StandardScaler, RobustScaler from sklearn.decomposition import PCA from sklearn.pipeline import Pipeline from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor from catboost import CatBoostRegressor import lightgbm as lgb<prepare_x_and_y>
NFOLDS = 10 folds = KFold(n_splits=NFOLDS) columns = X.columns splits = folds.split(X, y) y_preds_xgb = np.zeros(test_df.shape[0]) y_train_xgb = np.zeros(X.shape[0]) y_oof_xgb = np.zeros(X.shape[0]) train_df_set = xgb.DMatrix(X) test_set = xgb.DMatrix(test_df) for fold_n,(train_index, valid_index)in enumerate(splits): print('Fold:',fold_n+1) X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index] y_train, y_valid = y.iloc[train_index], y.iloc[valid_index] train_set = xgb.DMatrix(X_train, y_train) val_set = xgb.DMatrix(X_valid, y_valid) clf = xgb.train(params_xgb, train_set, num_boost_round=xgb_num_boost_round_max, evals=[(train_set, 'train'),(val_set, 'val')], verbose_eval=100) y_train_xgb += clf.predict(train_df_set)/ NFOLDS y_preds_xgb += clf.predict(test_set)/ NFOLDS del X_train, X_valid, y_train, y_valid gc.collect()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
all_data = pd.concat([train.drop("SalePrice", axis = 1), test]) all_data = pd.get_dummies(all_data) train_dummy = all_data[0:1456] test_dummy = all_data[1456:2915] train_dummy["SalePrice"] = train["SalePrice"] X = train_dummy.drop(["SalePrice", "Id"], axis = 1 ).values Y = train_dummy["SalePrice"].values<normalization>
%%time scaler = StandardScaler() train_log = pd.DataFrame( scaler.fit_transform(X), columns=X.columns, index=X.index ) test_log = pd.DataFrame( scaler.transform(test_df), columns=test_df.columns, index=test_df.index )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
scaler = StandardScaler() X = scaler.fit_transform(X) test_dummy = test_dummy.drop("Id", axis = 1) test_dummy = scaler.fit_transform(test_dummy )<choose_model_class>
logreg = LogisticRegression() logreg.fit(train_log, y) coeff_logreg = pd.DataFrame(train_log.columns.delete(0)) coeff_logreg.columns = ['feature'] coeff_logreg["score_logreg"] = pd.Series(logreg.coef_[0]) coeff_logreg.sort_values(by='score_logreg', ascending=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
pipe_cat = Pipeline([ ("scaler", StandardScaler()), ("cat", CatBoostRegressor(verbose = 0)) ]) params_cat = { "cat__depth" : [3, 4, 5], "cat__learning_rate" : [0.001, 0.01, 0.1, 1], "cat__n_estimators" : [300, 400, 500]} model_cat = CatBoostRegressor(verbose = 0, depth = 5, learning_rate = 0.1, n_estimators = 500) model_cat.fit(X, Y) model_score = cross_val_score(model_cat, X, Y, cv = 5, n_jobs = -1) Y_preds = model_cat.predict(X) print("R2 : " + str(model_score.mean())) print("RMSE : " + str(np.sqrt(mean_squared_error(Y, Y_preds))))<choose_model_class>
eli5.show_weights(logreg )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
pipe_gbm = Pipeline([ ("gbm", lgb.LGBMRegressor())]) params_gbm = { "gbm__n_estimators" : [300, 500, 700], "gbm__learning_rate" : [0.001, 0.01, 0.1], "gbm__max_depth" : [3, 6, 9], "gbm__max_bin" : [32, 64, 128, 256, 512]} model_gbm = lgb.LGBMRegressor(verbose = 0, learning_rate = 0.1, max_bin = 32, max_depth = 4, n_estimators = 500) model_gbm.fit(X, Y) model_score = cross_val_score(model_gbm, X, Y, cv = 5, n_jobs = -1) Y_preds = model_gbm.predict(X) print("R2 : " + str(model_score.mean())) print("RMSE : " + str(np.sqrt(mean_squared_error(Y, Y_preds))))<find_best_model_class>
y_logreg_train = logreg.predict(train_log) y_logreg_pred = logreg.predict(test_log )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
estimators = [("CatBoost", model_cat), ("LightGBM", model_gbm)] stacker = StackingRegressor(estimators = estimators) stacker.fit(X, Y) stacker_score = cross_val_score(stacker, X, Y, cv = 5, n_jobs = -1) Y_pred_stack = stacker.predict(X) Y_preds_sub = stacker.predict(test_dummy) print("R2 : " + str(stacker_score.mean())) print("RMSE : " + str(np.sqrt(mean_squared_error(Y, Y_pred_stack))))<save_to_csv>
def plot_cm(y_true, y_pred, title, figsize=(7,6)) : y_pred = y_pred.round().astype(int) cm = confusion_matrix(y_true, y_pred, labels=np.unique(y_true)) cm_sum = np.sum(cm, axis=1, keepdims=True) cm_perc = cm / cm_sum.astype(float)* 100 annot = np.empty_like(cm ).astype(str) nrows, ncols = cm.shape for i in range(nrows): for j in range(ncols): c = cm[i, j] p = cm_perc[i, j] if i == j: s = cm_sum[i] annot[i, j] = '%.1f%% %d/%d' %(p, c, s) elif c == 0: annot[i, j] = '' else: annot[i, j] = '%.1f%% %d' %(p, c) cm = pd.DataFrame(cm, index=np.unique(y_true), columns=np.unique(y_true)) cm.index.name = 'Actual' cm.columns.name = 'Predicted' fig, ax = plt.subplots(figsize=figsize) plt.title(title) sns.heatmap(cm, cmap= "YlGnBu", annot=annot, fmt='', ax=ax )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
submission = pd.DataFrame({"Id" : test.Id, "SalePrice" : Y_preds_sub}) submission.to_csv("submission_reg.csv", index = False) submission.head()<save_to_csv>
y_preds = w_lgb*y_preds_lgb + w_xgb*y_preds_xgb + w_logreg*y_logreg_pred
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
f = open(".. /input/submission/final_5_0.10117320406891336.csv") tem = pd.read_csv(f) tem.head(10) tem.to_csv('submission.csv', index=False )<import_modules>
sub['Pred'] = y_preds sub.head()
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
import pandas as pd import numpy as np import seaborn as sns import pylab as pl import matplotlib.pyplot as plt import math from sklearn.model_selection import train_test_split from sklearn.ensemble import GradientBoostingClassifier from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score<set_options>
sub.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,053,294
<load_from_csv><EOS>
sub.to_csv('submission.csv', index=False )
Google Cloud & NCAA® ML Competition 2020-NCAAM
8,179,034
<SOS> metric: logloss Kaggle data source: google-cloud-ncaa-march-madness-2020-division-1-mens-tournament<drop_column>
pd.set_option('max_columns', None) plt.style.use('seaborn') %matplotlib inline py.init_notebook_mode(connected=True) warnings.filterwarnings('ignore')
Google Cloud & NCAA® ML Competition 2020-NCAAM