kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
11,614,110
test['Title'] = test['Title'].replace(['Lady.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Jonkheer.', 'Dona.'], 'Rare.') test['Title'] = test['Title'].replace(['Countess.', 'Lady.', 'Sir.'], 'Royal.') test['Title'] = test['Title'].replace('Mlle.', 'Miss') test['Title'] = test['Title'].replace('Ms.', 'Miss.') test['Title'] = test['Title'].replace('Mme.', 'Mrs.') <categorify>
train.id.nunique()
Conway's Reverse Game of Life 2020
11,614,110
title_mapping = {"Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Royal.": 5, "Rare.": 6} train['Title'] = train['Title'].map(title_mapping) train['Title'] = train['Title'].fillna(0) train.head()<categorify>
train.delta.value_counts()
Conway's Reverse Game of Life 2020
11,614,110
title_mapping = {"Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Royal.": 5, "Rare.": 6} test['Title'] = test['Title'].map(title_mapping) test['Title'] = test['Title'].fillna(0) <drop_column>
train.start_0.value_counts()
Conway's Reverse Game of Life 2020
11,614,110
test = test.drop(['Ticket'], axis = 1) test = test.drop(['Name'], axis = 1) test = test.drop(['Parch'], axis = 1) test = test.drop(['Fare','SibSp'], axis = 1) train.drop(['Name', 'Ticket'], axis = 1, inplace = True) train = train.drop(['Parch'], axis = 1) train = train.drop(['Fare','SibSp'], axis = 1 )<split>
train.stop_0.value_counts()
Conway's Reverse Game of Life 2020
11,614,110
X_train, X_test, y_train, y_test = train_test_split(train.drop(['Survived','PassengerId'], axis=1), train['Survived'], test_size = 0.2, random_state = 0 )<train_model>
sample_start_1 = train.loc[1, train.columns.str.startswith('start')] sample_stop_1 = train.loc[1, train.columns.str.startswith('stop')]
Conway's Reverse Game of Life 2020
11,614,110
logisticRegression = LogisticRegression(max_iter = 30000) logisticRegression.fit(X_train, y_train) <predict_on_test>
sample_start_1 = np.asarray(sample_start_1 ).reshape(25, 25) sample_stop_1 = np.asarray(sample_stop_1 ).reshape(25, 25 )
Conway's Reverse Game of Life 2020
11,614,110
predictions = logisticRegression.predict(X_test) acc_LOG = round(accuracy_score(predictions, y_test)* 100, 2) print(acc_LOG) print(predictions) <compute_test_metric>
train.sum() [1:].sort_values()
Conway's Reverse Game of Life 2020
11,614,110
print(confusion_matrix(y_test, predictions))<compute_test_metric>
gs=train[:1][[col for col in train.columns if 'start' in col]].values.reshape(-1,1 )
Conway's Reverse Game of Life 2020
11,614,110
accuracy=(( 88+50)/(88+50+22+19)) print('accuracy is: ',(round(accuracy, 2)*100))<train_model>
%%cython c @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cdef int calc_neighs(unsigned char[:, :] field, int i, int j, int n, int k): cdef: int neighs = 0; int i_min = i - 1; int i_pl = i + 1; int j_min = j - 1; int j_pl = j + 1; neighs = 0 if i_min >= 0: if j_min >= 0: neighs += field[i_min, j_min] neighs += field[i_min, j] if j_pl < k: neighs += field[i_min, j_pl] if j_min >= 0: neighs += field[i, j_min] if j_pl < k: neighs += field[i, j_pl] if i_pl < n: if j_min >= 0: neighs += field[i_pl, j_min] neighs += field[i_pl, j] if j_pl < k: neighs += field[i_pl, j_pl] return neighs @cython.cdivision(True) @cython.boundscheck(False) @cython.nonecheck(False) @cython.wraparound(False) cpdef make_move(unsigned char[:, :] field, int moves): cdef: int _, i, j, neighs; int n, k; int switch = 0; unsigned char[:, :] cur_field; unsigned char[:, :] next_field; cur_field = np.copy(field) next_field = np.zeros_like(field, 'uint8') n = field.shape[0] k = field.shape[1] for _ in range(moves): if switch == 0: for i in range(n): for j in range(k): neighs = calc_neighs(cur_field, i, j, n, k) if cur_field[i, j] and neighs == 2: next_field[i, j] = 1 elif neighs == 3: next_field[i, j] = 1 else: next_field[i, j] = 0 else: for i in range(n): for j in range(k): neighs = calc_neighs(next_field, i, j, n, k) if next_field[i, j] and neighs == 2: cur_field[i, j] = 1 elif neighs == 3: cur_field[i, j] = 1 else: cur_field[i, j] = 0 switch =(switch + 1)% 2 return np.array(next_field if switch else cur_field )
Conway's Reverse Game of Life 2020
11,614,110
randomforest = RandomForestClassifier(random_state = 5, criterion = 'gini', max_depth = 10, max_features = 'auto', n_estimators = 500) randomforest.fit(X_train, y_train) pred = randomforest.predict(X_test) acc_randomforest = round(accuracy_score(pred, y_test)* 100, 2) print(acc_randomforest) <train_model>
NROW, NCOL = 25, 25 def generate_samples(delta=1, n=32): batch = np.split(np.random.binomial(1, 0.5,(NROW * n, NCOL)).astype('uint8'), n) Yy = [life.make_move(state, 5)for state in batch] Xx = [life.make_move(state, 1)for state in Yy] Y = np.array([y.ravel() for y in Yy]) X = np.array([x.ravel() for x in Xx]) return X, Y def data_generator(delta=1, batch_size=32, ravel=True): while True: batch = np.split(np.random.binomial(1, 0.5,(NROW * batch_size, NCOL)).astype('uint8'), batch_size) Yy = [make_move(state, 5)for state in batch] Xx = [make_move(state, delta)for state in Yy] if ravel: Y = np.array([y.ravel() for y in Yy]) X = np.array([x.ravel() for x in Xx]) yield X, Y else: yield np.array(Xx)[:,:, :, np.newaxis], np.array(Yy)[:, :, :, np.newaxis]
Conway's Reverse Game of Life 2020
11,614,110
gbk = GradientBoostingClassifier() gbk.fit(X_train, y_train) pred = gbk.predict(X_test) acc_gbk = round(accuracy_score(pred, y_test)* 100, 2) print(acc_gbk )<create_dataframe>
def create_model(n_hidden_convs=2, n_hidden_filters=128, kernel_size=5): nn = Sequential() nn.add(Conv2D(n_hidden_filters, kernel_size, padding='same', activation='relu', input_shape=(25, 25, 1))) nn.add(BatchNormalization()) for i in range(n_hidden_convs): nn.add(Conv2D(n_hidden_filters, kernel_size, padding='same', activation='relu')) nn.add(BatchNormalization()) nn.add(Conv2D(1, kernel_size, padding='same', activation='sigmoid')) nn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) return nn
Conway's Reverse Game of Life 2020
11,614,110
see={'TECHNIQUE':['RANDOM FOREST','LOGISTIC REGRESSION','GRADIENT BOOSTING'],'ACCURACY':[acc_randomforest,acc_LOG,acc_gbk]} mod=pd.DataFrame(see) mod<save_to_csv>
models = [] for delta in range(1, 6): model = create_model(n_hidden_convs=6, n_hidden_filters=256) es = EarlyStopping(monitor='loss', patience=9, min_delta=0.001) model.fit_generator(data_generator(delta=delta, ravel=False), steps_per_epoch=500, epochs=50, verbose=1, callbacks=[es]) models.append(model )
Conway's Reverse Game of Life 2020
11,614,110
ids = test['PassengerId'] predictions = randomforest.predict(test.drop('PassengerId', axis=1)) output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions }) output.to_csv('submission.csv', index=False )<set_options>
test = pd.read_csv('.. /input/conways-reverse-game-of-life-2020/test.csv', index_col='id' )
Conway's Reverse Game of Life 2020
11,614,110
np.random.seed(1234 )<load_from_csv>
submit_df = pd.DataFrame(index=test.index, columns=['start_' + str(i)for i in range(625)] )
Conway's Reverse Game of Life 2020
11,614,110
dataset = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') y = dataset['Survived'].values dataset.head()<data_type_conversions>
for delta in range(1, 6): mod = models[delta-1] delta_df = test[test.delta == delta].iloc[:, 1:].values.reshape(-1, 25, 25, 1) submit_df[test.delta == delta] = mod.predict(delta_df ).reshape(-1, 625 ).round(0 ).astype('uint8' )
Conway's Reverse Game of Life 2020
11,614,110
<feature_engineering><EOS>
submit_df.to_csv('submission.csv' )
Conway's Reverse Game of Life 2020
14,018,577
<SOS> metric: custom netric Kaggle data source: nfl-impact-detection<train_on_grid>
!tar xfz.. /input/nfl-lib/pkgs.tgz
NFL 1st and Future - Impact Detection
14,018,577
model = KNeighborsClassifier() hyperparameters = { "n_neighbors" : range(1,20,2), 'weights' : ['uniform', 'distance'], 'p' : [1, 2] } grid = GridSearchCV(model, param_grid=hyperparameters, cv=10) grid.fit(X, y) best_params = grid.best_params_ best_score = grid.best_score_ knn = grid.best_estimator_ y_pred = knn.predict(X_test) print(grid.best_params_) print(grid.best_estimator_) print(grid.best_score_ )<train_model>
warnings.filterwarnings("ignore" )
NFL 1st and Future - Impact Detection
14,018,577
perm = PermutationImportance(knn, random_state=1 ).fit(X_test, y_test) eli5.show_weights(perm, feature_names = X_test.columns.tolist() )<compute_train_metric>
d = pd.read_csv('.. /input/nfl-impact-detection/test_player_tracking.csv') IS_PRIVATE = d.shape !=(19269, 12) IS_PRIVATE = True
NFL 1st and Future - Impact Detection
14,018,577
dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred = dt.predict(X_test) accuracy_score(y_pred, y_test) <save_to_csv>
DATA_ROOT_PATH = 'test_images'
NFL 1st and Future - Impact Detection
14,018,577
dot_data = tree.export_graphviz(dt, out_file=None) graph = graphviz.Source(dot_data) graph.render("iris") dot_data = tree.export_graphviz(dt, out_file=None, feature_names=X_train.columns, class_names=['Survived', 'Not Survived'], filled=True, rounded=True, special_characters=True) graph = graphviz.Source(dot_data) graph<train_on_grid>
import argparse from utils.datasets import * from utils.general import non_max_suppression from utils.general import * from utils import torch_utils from utils.plots import plot_one_box
NFL 1st and Future - Impact Detection
14,018,577
hyperparameters = {"criterion": ["entropy", "gini"], "max_depth": [3, 5, 7, 10], "max_features": ["log2", "sqrt", 'auto'], 'min_samples_leaf' : [2, 3, 4, 5], 'min_samples_split' : [2, 3, 4, 5] } grid = GridSearchCV(dt, param_grid=hyperparameters, cv=10) grid.fit(X, y) best_params = grid.best_params_ best_score = grid.best_score_ dt = grid.best_estimator_ y_pred = dt.predict(X_test) print(grid.best_params_) print(grid.best_score_) plt.figure(figsize=(3,3)) sns.heatmap(confusion_matrix(y_pred, y_test), annot=True, cbar=False, fmt='1d', cmap='Blues') perm = PermutationImportance(dt, random_state=1 ).fit(X_test, y_test) eli5.show_weights(perm, feature_names = X_test.columns.tolist() )<train_on_grid>
names = ['0','1','2'] def detect(save_img=False): weights, imgsz = opt.weights,opt.img_size source = 'test_images/' device = torch_utils.select_device(opt.device) half = False model = torch.load(weights, map_location=device)['model'].to(device ).float().eval() dataset = LoadImages(source, img_size=opt.img_size) t0 = time.time() img = torch.zeros(( 1, 3, imgsz, imgsz), device=device) all_path=[] all_bboxex =[] all_score =[] all_c = [] for path, img, im0s, vid_cap in tqdm(dataset): print(im0s.shape) img = torch.from_numpy(img ).to(device) img = img.float() img /= 255.0 if img.ndimension() == 3: img = img.unsqueeze(0) t1 = torch_utils.time_synchronized() bboxes_2 = [] score_2 = [] c_2 = [] if True: pred = model(img, augment=opt.augment)[0] pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=None, agnostic=False) t2 = torch_utils.time_synchronized() flag=False bboxes = [] score = [] cc = [] for i, det in enumerate(pred): p, s, im0 = path, '', im0s gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] if det is not None and len(det): det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape ).round() for c in det[:, -1].unique() : n =(det[:, -1] == c ).sum() for *xyxy, conf, cls in det: xywh = torch.tensor(xyxy ).view(-1 ).numpy() bboxes.append(xywh) score.append(conf.cpu().numpy()) cc.append(cls.cpu().numpy()) bboxes_2.append(bboxes) score_2.append(score) c_2.append(cc) all_path.append(path) all_score.append(score_2) all_bboxex.append(bboxes_2) all_c.append(c_2) return all_path,all_score,all_bboxex,all_c if __name__ == '__main__': class opt: weights = ".. /input/yolov5temp1280/best(2 ).pt" img_size = 1280 conf_thres = 0.1 iou_thres = 0.3 augment = False device = '0' classes=None agnostic_nms = True opt.img_size = check_img_size(opt.img_size) print(opt) with torch.no_grad() : res = detect()
NFL 1st and Future - Impact Detection
14,018,577
model = RandomForestClassifier() hyperparameters = {"criterion": ["entropy", "gini"], "max_depth": [5, 10], "max_features": ["log2", "sqrt"], 'min_samples_leaf' : [2, 3, 4, 5], 'min_samples_split' : [2, 3, 4, 5], "n_estimators": [6, 9] } grid = GridSearchCV(model, param_grid=hyperparameters, cv=10) grid.fit(X, y) best_params = grid.best_params_ best_score = grid.best_score_ rf = grid.best_estimator_ y_pred = rf.predict(X_test) print(grid.best_params_) print(grid.best_score_) plt.figure(figsize=(3,3)) sns.heatmap(confusion_matrix(y_pred, y_test), annot=True, cbar=False, fmt='1d', cmap='Blues') perm = PermutationImportance(rf, random_state=1 ).fit(X_test, y_test) eli5.show_weights(perm, feature_names = X_test.columns.tolist() )<save_to_csv>
%matplotlib inline all_path,all_score,all_bboxex,all_labels = res results =[] results_boxes =[] results_scores = [] results_labels = [] result_image_ids =[] for row in range(len(all_path)) : image_id = all_path[row].split("/")[-1] boxes = np.array(all_bboxex[row])[0] scores = np.array(all_score[row])[0] labels = np.array(all_labels[row])[0] if len(boxes)==0: continue boxes[:, 2] = boxes[:, 2] - boxes[:, 0] boxes[:, 3] = boxes[:, 3] - boxes[:, 1] boxes = boxes.astype(int) if len(boxes)>0: result_image_ids += [image_id]*len(boxes) results_boxes.append(boxes) results_scores.append(scores) results_labels.append(labels) idx = row idx = 0 size = 300 idx =-1 font = cv2.FONT_HERSHEY_SIMPLEX image = image = cv2.imread(all_path[idx], cv2.IMREAD_COLOR) fontScale = 1 boxes = results_boxes[idx] scores = results_scores[idx] label = results_labels[idx] color =(255, 0, 0) thickness = 2 for b,s,l in zip(boxes,scores,label): color =(255,0,0)if int(l)==2 else(0,0,255) image = cv2.rectangle(image,(b[0],b[1]),(b[0]+b[2],b[1]+b[3]), color, 1) image = cv2.putText(image, '{:.2}'.format(s),(int(b[0]),int(b[1])) , font, fontScale, color, thickness, cv2.LINE_AA) plt.figure(figsize=[20,20]) plt.imshow(image[:,:,::-1]) plt.show()
NFL 1st and Future - Impact Detection
14,018,577
holdout_ids = test["PassengerId"] holdout_features = test[features] holdout_predictions = lr.predict(holdout_features) submission = pd.DataFrame({"PassengerId": holdout_ids, "Survived": holdout_predictions}) print(submission.head()) submission.to_csv("submission.csv",index=False )<load_from_csv>
box_df = pd.DataFrame(np.concatenate(results_boxes), columns=['left', 'top', 'width', 'height']) score_df = pd.DataFrame({'scores':np.concatenate(results_scores),'labels':np.concatenate(results_labels), 'image_name':result_image_ids}) test_df = pd.concat([box_df, score_df], axis=1 )
NFL 1st and Future - Impact Detection
14,018,577
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) train = pd.read_csv('.. /input/titanic/train.csv') test = pd.read_csv('.. /input/titanic/test.csv') print('Shape of train dataset: {}'.format(train.shape)) print('Shape of test dataset: {}'.format(test.shape))<concatenate>
test_df['gameKey'] = test_df.image_name.str.split('_' ).str[0].astype(int) test_df['playID'] = test_df.image_name.str.split('_' ).str[1].astype(int) test_df['view'] = test_df.image_name.str.split('_' ).str[2] test_df['frame'] = test_df.image_name.str.split('_' ).str[3].str.replace('.png','' ).astype(int) test_df['video'] = test_df.image_name.str.rsplit('_',1 ).str[0] + '.mp4'
NFL 1st and Future - Impact Detection
14,018,577
train['Type'] = 'train' test['Type'] = 'test' all = pd.concat([train, test], sort=False ).reset_index(drop=True) print('Shape of all dataset: {}'.format(all.shape))<count_missing_values>
test_df.to_csv('off.csv',index=False )
NFL 1st and Future - Impact Detection
14,018,577
print(all.isnull().values.any()) train.isnull().sum()<filter>
import numpy as np import pandas as pd from scipy.optimize import linear_sum_assignment
NFL 1st and Future - Impact Detection
14,018,577
all_corr[all_corr['level_0'] == 'Age']<groupby>
SEED = 42 FOLDS = 5 FOLD = 2 EFF_DET = 5 Threshold = 0.45 LOOK_BACKWARD_LIMIT = 10 IOU_THRESHOLD = 0.4 IMAGE_SIZE = 512 NUM_CLASSES = 2 BATCH_SIZE = 16 FRAME_THRESHOLD = 120 keep_columns_threshold = 20
NFL 1st and Future - Impact Detection
14,018,577
all['Age'] = all.groupby(['Pclass', 'SibSp'])['Age'].apply(lambda x: x.fillna(x.median()))<feature_engineering>
def iou(bbox1, bbox2): bbox1 = [float(x)for x in bbox1] bbox2 = [float(x)for x in bbox2] (x0_1, y0_1, x1_1, y1_1)= bbox1 (x0_2, y0_2, x1_2, y1_2)= bbox2 overlap_x0 = max(x0_1, x0_2) overlap_y0 = max(y0_1, y0_2) overlap_x1 = min(x1_1, x1_2) overlap_y1 = min(y1_1, y1_2) if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: return 0 size_1 =(x1_1 - x0_1)*(y1_1 - y0_1) size_2 =(x1_2 - x0_2)*(y1_2 - y0_2) size_intersection =(overlap_x1 - overlap_x0)*(overlap_y1 - overlap_y0) size_union = size_1 + size_2 - size_intersection return size_intersection / size_union def precision_calc(gt_boxes, pred_boxes): cost_matix = np.ones(( len(gt_boxes), len(pred_boxes))) for i, box1 in enumerate(gt_boxes): for j, box2 in enumerate(pred_boxes): dist = abs(box1[0]-box2[0]) if dist > 4: continue iou_score = iou(box1[1:], box2[1:]) if iou_score < 0.35: continue else: cost_matix[i,j]=0 row_ind, col_ind = linear_sum_assignment(cost_matix) fn = len(gt_boxes)- row_ind.shape[0] fp = len(pred_boxes)- col_ind.shape[0] tp=0 for i, j in zip(row_ind, col_ind): if cost_matix[i,j]==0: tp+=1 else: fp+=1 fn+=1 return tp, fp, fn
NFL 1st and Future - Impact Detection
14,018,577
print(all_corr[all_corr['level_0'] == 'Fare']) all['Fare'] = all.groupby(['Pclass', 'Parch', 'SibSp'])['Fare'].apply(lambda x: x.fillna(x.median()))<count_values>
def filter_duplicates_and_threshols(row): boxes,scores,labels = row.box_predictions,row.score_list,row.label_list filtered_boxes = [] filtered_scores = [] filtered_labels = [] for box,score,label in zip(boxes,scores,labels): if score> Threshold: add_box = True for i,ref_box in enumerate(filtered_boxes): if iou(box,ref_box)>0.5: add_box = False if label==2:filtered_labels[i]=2 if add_box: filtered_boxes.append(box) filtered_scores.append(score) filtered_labels.append(label) row.box_predictions,row.score_list,row.label_list = filtered_boxes,filtered_scores,filtered_labels return row def filter_duplicates_and_low_scores(row): boxes,scores,labels = row.box_predictions,row.score_list,row.label_list ordered_sample_predictions = sorted(list(zip(boxes,scores,labels)) , key = lambda x:x[1], reverse = True) filtered_boxes = [] filtered_scores = [] filtered_labels = [] for box,score,label in ordered_sample_predictions: if score>Threshold: add_box = True for i,ref_box in enumerate(filtered_boxes): if iou(box,ref_box)>0.5: add_box = False if label==2:filtered_labels[i]=2 if add_box: filtered_boxes.append(box) filtered_scores.append(score) filtered_labels.append(label) row.box_predictions,row.score_list,row.label_list = filtered_boxes,filtered_scores,filtered_labels return row
NFL 1st and Future - Impact Detection
14,018,577
all['Cabin'].value_counts()<data_type_conversions>
video_labels = pd.read_csv('off.csv' )
NFL 1st and Future - Impact Detection
14,018,577
all['Cabin'] = all['Cabin'].fillna('N' )<count_values>
video_labels = video_labels.sort_values(by=['video','frame'] ).reset_index(drop=True) video_labels = video_labels[video_labels.frame>0] video_labels['image'] = video_labels['video'].str.replace('.mp4', '')+ '_' + video_labels['frame'].astype(str)+ '.png' video_labels['right'] = video_labels['left'] + video_labels['width'] video_labels['bottom'] = video_labels['top'] + video_labels['height'] video_labels['label_list'] = video_labels['labels'].apply(lambda x:[x]) video_labels['bbox'] = pd.Series(video_labels[['left','top','right','bottom']].values.tolist() , index=video_labels.index) video_labels['box_predictions'] = video_labels['bbox'].apply(lambda x:[x]) video_labels['score_list'] = video_labels['scores'].apply(lambda x:[x]) base_cols = ['video','image','gameKey','frame'] target_cols = ['box_predictions','label_list','score_list'] predictions = video_labels[base_cols+target_cols].groupby(base_cols ).sum().reset_index() predictions = predictions.apply(filter_duplicates_and_low_scores,axis=1) predictions['n_impact'] = predictions.label_list.apply(lambda x:(np.array(x)==2 ).sum()) predictions.groupby('video' ).n_impact.sum().describe()
NFL 1st and Future - Impact Detection
14,018,577
print(all['Embarked'].value_counts()) all['Embarked'] = all['Embarked'].fillna('S' )<count_missing_values>
def iou(bbox1, bbox2): bbox1 = [float(x)for x in bbox1] bbox2 = [float(x)for x in bbox2] (x0_1, y0_1, x1_1, y1_1)= bbox1 (x0_2, y0_2, x1_2, y1_2)= bbox2 overlap_x0 = max(x0_1, x0_2) overlap_y0 = max(y0_1, y0_2) overlap_x1 = min(x1_1, x1_2) overlap_y1 = min(y1_1, y1_2) if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0: return 0 size_1 =(x1_1 - x0_1)*(y1_1 - y0_1) size_2 =(x1_2 - x0_2)*(y1_2 - y0_2) size_intersection =(overlap_x1 - overlap_x0)*(overlap_y1 - overlap_y0) size_union = size_1 + size_2 - size_intersection return size_intersection / size_union def match_boxes(bbox_set1,bbox_set2,diagonal=True,iou_threshold=0.35): all_pairs = {} relation = [None for x in bbox_set2] done1 = {} done2 = {} for i,bbox1 in enumerate(bbox_set1): for j,bbox2 in enumerate(bbox_set2): if i!=j or diagonal: all_pairs[(i,j)] = iou(bbox1,bbox2) for(index1,index2),iou_score in sorted(all_pairs.items() , key=lambda item: item[1],reverse=True): if iou_score<iou_threshold: return relation if(index1 not in done1)and(index2 not in done2): relation[index2] = index1 done1[index1] = 1 done2[index2] = 1 return relation def map_classes(row): mapping = {} for box,pred in zip(row.box_predictions,row.label_list): mapping[tuple(box)] = pred return mapping def assign_players(df,video,LOOK_BACKWARD_LIMIT=LOOK_BACKWARD_LIMIT,IOU_THRESHOLD=IOU_THRESHOLD): video_df = df[df.video==video].sort_values(by='frame' ).set_index('frame') row_to_class_map = video_df['row_to_class_map'] row_to_class_map.apply(lambda x:x.update({() :-1})) player_to_bbox = pd.DataFrame() bbox_to_player = {k+1:{} for k in range(video_df.shape[0])} N_PLAYERS = 0 bboxes = video_df.loc[1,'box_predictions'] for player_id,box in enumerate(bboxes): player_to_bbox[player_id] = None player_to_bbox.at[1,player_id] = box bbox_to_player[1][tuple(box)] = player_id N_PLAYERS += 1 for frame in range(2,video_df.shape[0]+1): player_to_bbox.loc[frame] = None bboxes = video_df.loc[frame,'box_predictions'] assigned = {} look_backward = 1 while(look_backward<min(frame,LOOK_BACKWARD_LIMIT+1)and len(bboxes)>0): ref_bbox = [] for box in video_df.loc[frame-look_backward,'box_predictions']: if bbox_to_player[frame-look_backward][tuple(box)] not in assigned: ref_bbox.append(box) relation = match_boxes(ref_bbox,bboxes,iou_threshold=IOU_THRESHOLD) left_out_bboxes = [] for index,ref_index in enumerate(relation): if ref_index is not None: player_id = bbox_to_player[frame-look_backward][tuple(ref_bbox[ref_index])] box = bboxes[index] player_to_bbox.at[frame,player_id] = box bbox_to_player[frame][tuple(box)] = player_id assigned[player_id] = True else: left_out_bboxes.append(bboxes[index]) bboxes = left_out_bboxes look_backward +=1 for box in bboxes: player_id = N_PLAYERS player_to_bbox[player_id] = None player_to_bbox.at[frame,player_id] = box bbox_to_player[frame][tuple(box)] = player_id N_PLAYERS += 1 player_to_class_map = player_to_bbox.apply(lambda row: row.fillna('' ).apply(tuple ).apply( lambda x:row_to_class_map.loc[row.name][x]),axis=1) return player_to_bbox,player_to_class_map def filter_impacts(player_series): impacted_player_series = pd.DataFrame(player_series.loc[player_series==2]) impacted_player_series['indices'] = impacted_player_series.index start_indices = impacted_player_series[impacted_player_series.indices.diff(1 ).fillna(99 ).abs() >9].indices.tolist() end_indices = impacted_player_series[impacted_player_series.indices.diff(-1 ).fillna(99 ).abs() >9].indices.tolist() output_series = player_series.copy(deep=True) output_series[player_series>0] = 1 output_series[player_series<0] = -1 for start_index,end_index in zip(start_indices,end_indices): difference = end_index-start_index+1 n_medians = math.ceil(difference/9) difference_split = difference/(n_medians+1) centres = [int(start_index+(i+1)*difference_split)for i in range(n_medians)] output_series.loc[centres] = 2 return output_series
NFL 1st and Future - Impact Detection
14,018,577
all.isnull().values.any()<string_transform>
submission_df = pd.DataFrame(columns=['gameKey','playID','view','video','frame','left','width','top','height']) predictions['row_to_class_map'] = predictions.apply(map_classes,axis=1) entries = 0 for video in predictions.video.unique() : assigned_players_boxes,assigned_players_classes = assign_players(predictions,video) keep_columns = assigned_players_classes.columns[(assigned_players_classes!=-1 ).sum() >keep_columns_threshold] assigned_players_boxes = assigned_players_boxes[keep_columns] assigned_players_classes = assigned_players_classes[keep_columns] print(assigned_players_boxes.shape) assigned_players_classes_filtered = assigned_players_classes.apply(filter_impacts) n_filtered =(assigned_players_classes_filtered==2 ).sum().sum() n_classes =(assigned_players_classes==2 ).sum().sum() print(video,n_filtered,n_classes,(~assigned_players_boxes.isna() ).sum().sum()) all_bboxes = assigned_players_boxes.fillna(method='ffill' ).values.reshape(-1) all_classes = assigned_players_classes_filtered.values.reshape(-1) all_frames = assigned_players_boxes.apply(lambda x: pd.DataFrame(x ).apply( lambda y: y.name, axis=1)).values.reshape(-1) bboxes_with_impact = all_bboxes[np.where(all_classes==2)] frames_with_impact = all_frames[np.where(all_classes==2)] gameKey,playID,view,_ = video.replace('.','_' ).split('_') for frame,box in zip(frames_with_impact,bboxes_with_impact): submission_df.loc[entries,'gameKey'] = gameKey submission_df.loc[entries,'playID'] = playID submission_df.loc[entries,'view'] = view submission_df.loc[entries,'video'] = video submission_df.loc[entries,'frame'] = frame submission_df.loc[entries,'left'] = box[0] submission_df.loc[entries,'top'] = box[1] submission_df.loc[entries,'width'] = box[2]-box[0] submission_df.loc[entries,'height'] = box[3]-box[1] entries += 1 submission_df = submission_df[(submission_df.frame<FRAME_THRESHOLD)&(submission_df.frame>20)]
NFL 1st and Future - Impact Detection
14,018,577
def extract_surname(data): families = [] for i in range(len(data)) : name = data.iloc[i] if '(' in name: name_no_bracket = name.split('(')[0] else: name_no_bracket = name family = name_no_bracket.split(',')[0] title = name_no_bracket.split(',')[1].strip().split(' ')[0] for c in string.punctuation: family = family.replace(c, '' ).strip() families.append(family) return families print(all['Name'].value_counts()) all['Title'] = all['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] all['Family'] = extract_surname(all['Name']) all['IsMarried'] = np.where(all['Title'] == 'Mrs', 1, 0) all.drop(['Name'], inplace=True, axis=1 )<feature_engineering>
submission_df1 = submission_df.copy(deep=True )
NFL 1st and Future - Impact Detection
14,018,577
all['FamilySize'] = all['SibSp'] + all['Parch'] + 1<categorify>
SEED = 42 FOLDS = 5 FOLD = 2 EFF_DET = 5 Threshold = 0.65 LOOK_BACKWARD_LIMIT = 10 IOU_THRESHOLD = 0.4 IMAGE_SIZE = 512 NUM_CLASSES = 2 BATCH_SIZE = 16 FRAME_THRESHOLD = 240 keep_columns_threshold = 20 video_labels = pd.read_csv('off.csv') video_labels = video_labels.sort_values(by=['video','frame'] ).reset_index(drop=True) video_labels = video_labels[video_labels.frame>0] video_labels['image'] = video_labels['video'].str.replace('.mp4', '')+ '_' + video_labels['frame'].astype(str)+ '.png' video_labels['right'] = video_labels['left'] + video_labels['width'] video_labels['bottom'] = video_labels['top'] + video_labels['height'] video_labels['label_list'] = video_labels['labels'].apply(lambda x:[x]) video_labels['bbox'] = pd.Series(video_labels[['left','top','right','bottom']].values.tolist() , index=video_labels.index) video_labels['box_predictions'] = video_labels['bbox'].apply(lambda x:[x]) video_labels['score_list'] = video_labels['scores'].apply(lambda x:[x]) base_cols = ['video','image','gameKey','frame'] target_cols = ['box_predictions','label_list','score_list'] predictions = video_labels[base_cols+target_cols].groupby(base_cols ).sum().reset_index() predictions = predictions.apply(filter_duplicates_and_low_scores,axis=1) predictions['n_impact'] = predictions.label_list.apply(lambda x:(np.array(x)==2 ).sum()) predictions.groupby('video' ).n_impact.sum().describe() submission_df = pd.DataFrame(columns=['gameKey','playID','view','video','frame','left','width','top','height']) predictions['row_to_class_map'] = predictions.apply(map_classes,axis=1) entries = 0 for video in predictions.video.unique() : assigned_players_boxes,assigned_players_classes = assign_players(predictions,video) keep_columns = assigned_players_classes.columns[(assigned_players_classes!=-1 ).sum() >keep_columns_threshold] assigned_players_boxes = assigned_players_boxes[keep_columns] assigned_players_classes = assigned_players_classes[keep_columns] print(assigned_players_boxes.shape) assigned_players_classes_filtered = assigned_players_classes.apply(filter_impacts) n_filtered =(assigned_players_classes_filtered==2 ).sum().sum() n_classes =(assigned_players_classes==2 ).sum().sum() print(video,n_filtered,n_classes,(~assigned_players_boxes.isna() ).sum().sum()) all_bboxes = assigned_players_boxes.fillna(method='ffill' ).values.reshape(-1) all_classes = assigned_players_classes_filtered.values.reshape(-1) all_frames = assigned_players_boxes.apply(lambda x: pd.DataFrame(x ).apply( lambda y: y.name, axis=1)).values.reshape(-1) bboxes_with_impact = all_bboxes[np.where(all_classes==2)] frames_with_impact = all_frames[np.where(all_classes==2)] gameKey,playID,view,_ = video.replace('.','_' ).split('_') for frame,box in zip(frames_with_impact,bboxes_with_impact): submission_df.loc[entries,'gameKey'] = gameKey submission_df.loc[entries,'playID'] = playID submission_df.loc[entries,'view'] = view submission_df.loc[entries,'video'] = video submission_df.loc[entries,'frame'] = frame submission_df.loc[entries,'left'] = box[0] submission_df.loc[entries,'top'] = box[1] submission_df.loc[entries,'width'] = box[2]-box[0] submission_df.loc[entries,'height'] = box[3]-box[1] entries += 1 submission_df = submission_df[(submission_df.frame<FRAME_THRESHOLD)&(submission_df.frame>120)]
NFL 1st and Future - Impact Detection
14,018,577
family_map = {1: 'Alone', 2: 'Small', 3: 'Small', 4: 'Small', 5: 'Medium', 6: 'Medium', 7: 'Large', 8: 'Large', 11: 'Large'} all['FamilySizeGrouped'] = all['FamilySize'].map(family_map )<feature_engineering>
submission_df2 = submission_df.copy(deep=True )
NFL 1st and Future - Impact Detection
14,018,577
all['Ticket_count'] = all.Ticket.apply(lambda x: all[all['Ticket']==x].shape[0] )<feature_engineering>
submission_df = pd.concat([submission_df1,submission_df2] )
NFL 1st and Future - Impact Detection
14,018,577
all.loc[(all['Sex'] == 'male')&(all['Age'] > 18.0), 'Sex'] = 0 all.loc[(all['Sex'] == 'male')&(all['Age'] <= 18.0), 'Sex'] = 1 all.loc[all['Sex'] == 'female', 'Sex'] = 2<feature_engineering>
env = nflimpact.make_env()
NFL 1st and Future - Impact Detection
14,018,577
<count_values><EOS>
if IS_PRIVATE: env.predict(submission_df) else: sub = pd.read_csv('.. /input/nfl-impact-detection/sample_submission.csv') env.predict(sub )
NFL 1st and Future - Impact Detection
10,378,659
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<predict_on_test>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
10,378,659
stack_ypred = stacking_clf.predict(dval_X) print('Stacking classifier accuracy score is.... ',accuracy_score(val_Y,stack_ypred))<predict_on_test>
print('TensorFlow version', tf.__version__) AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
10,378,659
prediction_stacking_clf = stacking_clf.predict(X_test )<save_to_csv>
start_time = datetime.now() print('Time now is', start_time) end_training_by_tdelta = timedelta(seconds=8400) this_run_file_prefix = start_time.strftime('%Y%m%d_%H%M_') print(this_run_file_prefix) IMAGE_SIZE = [224, 224] EPOCHS = 12 BATCH_SIZE = 16 * strategy.num_replicas_in_sync GCS_PATH_SELECT = { 192: GCS_DS_PATH + '/tfrecords-jpeg-192x192', 224: GCS_DS_PATH + '/tfrecords-jpeg-224x224', 331: GCS_DS_PATH + '/tfrecords-jpeg-331x331', 512: GCS_DS_PATH + '/tfrecords-jpeg-512x512' } GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]] TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec') VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec') TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec') MOREIMAGES_PATH_SELECT = { 192: '/tfrecords-jpeg-192x192', 224: '/tfrecords-jpeg-224x224', 331: '/tfrecords-jpeg-331x331', 512: '/tfrecords-jpeg-512x512' } MOREIMAGES_PATH = MOREIMAGES_PATH_SELECT[IMAGE_SIZE[0]] IMAGENET_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/imagenet' + MOREIMAGES_PATH + '/*.tfrec') INATURELIST_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/inaturalist' + MOREIMAGES_PATH + '/*.tfrec') OPENIMAGE_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/openimage' + MOREIMAGES_PATH + '/*.tfrec') OXFORD_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/oxford_102' + MOREIMAGES_PATH + '/*.tfrec') TENSORFLOW_FILES = tf.io.gfile.glob(MORE_IMAGES_GCS_DS_PATH + '/tf_flowers' + MOREIMAGES_PATH + '/*.tfrec') ADDITIONAL_TRAINING_FILENAMES = IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES + OXFORD_FILES + TENSORFLOW_FILES print('----') TRAINING_FILENAMES = TRAINING_FILENAMES + ADDITIONAL_TRAINING_FILENAMES + VALIDATION_FILENAMES CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
Petals to the Metal - Flower Classification on TPU
10,378,659
output = pd.DataFrame({"PassengerId":passengerid , "Survived" : prediction_stacking_clf}) output.to_csv("Submission_stacking_clf.csv",index = False )<load_from_csv>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = LR_START LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY = 0.80 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr = LR_START +(epoch *(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS) elif epoch <(LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS): lr = LR_MAX else: lr = LR_MIN +(LR_MAX - LR_MIN)* LR_EXP_DECAY **(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS) return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose = True) rng = [i for i in range(30)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print(y )
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train = pd.read_csv('.. /input/train.csv') df_test = pd.read_csv('.. /input/test.csv') df_train.head()<prepare_output>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def display_confusion_matrix(cmat, score, precision, recall): plt.figure(figsize=(15,15)) ax = plt.gca() ax.matshow(cmat, cmap='Reds') ax.set_xticks(range(len(CLASSES))) ax.set_xticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_xticklabels() , rotation=45, ha="left", rotation_mode="anchor") ax.set_yticks(range(len(CLASSES))) ax.set_yticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_yticklabels() , rotation=45, ha="right", rotation_mode="anchor") titlestring = "" if score is not None: titlestring += 'f1 = {:.3f} '.format(score) if precision is not None: titlestring += ' precision = {:.3f} '.format(precision) if recall is not None: titlestring += ' recall = {:.3f} '.format(recall) if len(titlestring)> 0: ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':' plt.show() def display_training_curves(training, validation, title, subplot): if subplot%10==1: plt.subplots(figsize=(10,10), facecolor=' plt.tight_layout() ax = plt.subplot(subplot) ax.set_facecolor(' ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.'] )
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train.set_index('Id', inplace=True) df_test.set_index('Id', inplace=True )<drop_column>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels = 3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { 'image': tf.io.FixedLenFeature([], tf.string), 'class': tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { 'image': tf.io.FixedLenFeature([], tf.string), 'id': tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled = True, ordered = False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads = AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls = AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image) image = tf.image.random_saturation(image, 0, 2) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled = True) dataset = dataset.map(data_augment, num_parallel_calls = AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered = False): dataset = load_dataset(VALIDATION_FILENAMES, labeled = True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered = False): dataset = load_dataset(TEST_FILENAMES, labeled = False, ordered = ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train.drop(['GarageArea','1stFlrSF','TotRmsAbvGrd','2ndFlrSF'], axis=1, inplace=True) df_test.drop(['GarageArea','1stFlrSF','TotRmsAbvGrd','2ndFlrSF'], axis=1, inplace=True )<filter>
training_dataset = get_training_dataset() training_dataset = training_dataset.unbatch().batch(20) train_batch = iter(training_dataset)
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train = df_train[df_train['GrLivArea']<4500]<count_missing_values>
test_dataset = get_test_dataset() test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset)
Petals to the Metal - Flower Classification on TPU
10,378,659
def check_nulls(df): percent_missing =(df.isnull().sum() * 100 / len(df)).sort_values() return round(percent_missing,2 )<count_missing_values>
test_dataset = get_test_dataset() test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset )
Petals to the Metal - Flower Classification on TPU
10,378,659
check_nulls(df_train )<count_missing_values>
early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=20, restore_best_weights = True )
Petals to the Metal - Flower Classification on TPU
10,378,659
check_nulls(df_test )<define_variables>
def create_VGG16_model() : pretrained_model = tf.keras.applications.VGG16(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
categorical_list = [col for col in df_train.columns if df_train[col].dtypes == object] numerical_list = [col for col in df_train.columns if df_train[col].dtypes != object] print('Categories:', categorical_list) print('Numbers:', numerical_list )<categorify>
def create_Xception_model() : pretrained_model = tf.keras.applications.Xception(include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
def fill_missing_values(df): lst = ["Alley","BsmtQual","BsmtCond","BsmtExposure","BsmtFinType1", "BsmtFinType2","Fence","FireplaceQu","GarageType","GarageFinish", "GarageQual","GarageCond","Electrical","GarageFinish","MiscFeature","MasVnrType","PoolQC"] for col in lst: df[col] = df[col].fillna("Not present") lst = ['GarageYrBlt','MasVnrArea','BsmtFinSF1','BsmtFinSF2','TotalBsmtSF', 'BsmtUnfSF','BsmtFullBath','BsmtHalfBath','MasVnrArea','GarageCars'] for col in lst: df[col] = df[col].fillna(0) lst = ['Utilities','MSZoning','Exterior1st','Exterior2nd','Electrical','KitchenQual'] for col in lst: df[col] = df[col].fillna(df[col].mode() [0]) df['Functional'] = df['Functional'].fillna('Typ') df['SaleType'] = df['SaleType'].fillna('Normal') df['LotFrontage'] = df['LotFrontage'].fillna(df.LotFrontage.mean()) df.drop('PoolQC', axis=1, inplace=True) <count_missing_values>
def create_DenseNet_model() : pretrained_model = tf.keras.applications.DenseNet201(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
fill_missing_values(df_train) fill_missing_values(df_test )<count_missing_values>
def create_EfficientNet_model() : pretrained_model = efficientnet.EfficientNetB7(weights = 'noisy-student', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
print(df_train.isnull().sum().sum()) print(df_test.isnull().sum().sum() )<categorify>
def create_InceptionV3_model() : pretrained_model = tf.keras.applications.InceptionV3(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
def label_encode(df): df['MSSubClass'] = df['MSSubClass'].astype(object) df = df.replace({"Alley" : {"Not present" : 0, "Grvl" : 1, "Pave" : 2}, "BsmtCond" : {"Not present" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "BsmtExposure" : {"Not present" : 0, "No" : 0, "Mn" : 1, "Av": 2, "Gd" : 3}, "BsmtFinType1" : {"Not present" : 0, "Unf" : 1, "LwQ": 2, "Rec" : 3, "BLQ" : 4, "ALQ" : 5, "GLQ" : 6}, "BsmtFinType2" : {"Not present" : 0, "Unf" : 1, "LwQ": 2, "Rec" : 3, "BLQ" : 4, "ALQ" : 5, "GLQ" : 6}, "BsmtQual" : {"Not present" : 0, "Po" : 1, "Fa" : 2, "TA": 3, "Gd" : 4, "Ex" : 5}, "CentralAir" : {"N" : 0, "Y" : 1}, "ExterCond" : {"Po" : 1, "Fa" : 2, "TA": 3, "Gd": 4, "Ex" : 5}, "ExterQual" : {"Po" : 1, "Fa" : 2, "TA": 3, "Gd": 4, "Ex" : 5}, "FireplaceQu" : {"Not present" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "Functional" : {"Sal" : 1, "Sev" : 2, "Maj2" : 3, "Maj1" : 4, "Mod": 5, "Min2" : 6, "Min1" : 7, "Typ" : 8}, "GarageCond" : {"Not present" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "GarageQual" : {"Not present" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "GarageFinish" :{"Not present" : 0, "Unf" : 1, "RFn" : 2, "Fin" : 3}, "HeatingQC" : {"Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "KitchenQual" : {"Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "LandSlope" : {"Sev" : 1, "Mod" : 2, "Gtl" : 3}, "LotShape" : {"IR3" : 1, "IR2" : 2, "IR1" : 3, "Reg" : 4}, "PavedDrive" : {"N" : 0, "P" : 1, "Y" : 2}, "PoolQC" : {"Not present" : 0, "Fa" : 1, "TA" : 2, "Gd" : 3, "Ex" : 4}, "Street" : {"Grvl" : 1, "Pave" : 2}, "Utilities" : {"ELO" : 1, "NoSeWa" : 2, "NoSewr" : 3, "AllPub" : 4}, "Fence": {"Not present" : 0, "MnWw" : 1, "GdWo" : 2, "MnPrv" : 3, "GdPrv" : 4 }}, ) return df <categorify>
def create_ResNet152_model() : pretrained_model = tf.keras.applications.ResNet152V2(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train = label_encode(df_train) df_test = label_encode(df_test )<feature_engineering>
def create_MobileNetV2_model() : pretrained_model = tf.keras.applications.MobileNetV2(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train['SalePrice']=np.log(df_train['SalePrice']) _ = sns.distplot(df_train["SalePrice"] )<define_variables>
def create_InceptionResNetV2_model() : pretrained_model = tf.keras.applications.InceptionResNetV2(weights = 'imagenet', include_top = False, input_shape = [*IMAGE_SIZE, 3]) pretrained_model.trainable = True model = tf.keras.Sequential([ pretrained_model, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation = 'softmax') ]) model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['sparse_categorical_accuracy']) return model
Petals to the Metal - Flower Classification on TPU
10,378,659
cat_list = [col for col in df_train.columns if df_train[col].dtypes == object] num_list = [col for col in df_train.columns if df_train[col].dtypes != object]<merge>
no_of_models = 2 models = [0] * no_of_models start_model = 0 end_model = 2 model_indx_0 = start_model model_indx_1 = start_model + 1
Petals to the Metal - Flower Classification on TPU
10,378,659
categorical_data = df_train[cat_list] numerical_data = df_train[num_list] df_train = categorical_data.join(numerical_data )<merge>
val_probabilities = [0] * no_of_models test_probabilities = [0] * no_of_models all_probabilities = [0] * no_of_models
Petals to the Metal - Flower Classification on TPU
10,378,659
num_list.remove('SalePrice') cat_test = df_test[cat_list] num_test = df_test[num_list] df_test = cat_test.join(num_test )<prepare_x_and_y>
with strategy.scope() : for j in range(no_of_models): models[j] = create_EfficientNet_model() models[0].summary()
Petals to the Metal - Flower Classification on TPU
10,378,659
X = df_train.drop('SalePrice', axis=1 ).values y = df_train['SalePrice'].values df_test_values = df_test.values<categorify>
def write_history(j): history_dict = [0] * no_of_models for i in range(j + 1): if(historys[i] != 0): history_dict[i] = historys[i].history filename = './' + this_run_file_prefix + 'model_history_' + str(j)+ '.pkl' pklfile = open(filename, 'ab') pickle.dump(history_dict, pklfile) pklfile.close()
Petals to the Metal - Flower Classification on TPU
10,378,659
def encode(X): labelencoder = LabelEncoder() for i in range(len(cat_list)) : X[:,i] = labelencoder.fit_transform(X[:,i]) for i in range(len(cat_list)) : onehotencoder = OneHotEncoder(categorical_features=[i]) X = onehotencoder.fit_transform(X ).toarray() X = X[:,i:] <categorify>
EPOCHS = 20 historys = [0] * no_of_models finished_models = 0 for j in range(start_model, end_model): start_training = datetime.now() print(start_training) time_from_start_program_tdelta = start_training - start_time if time_from_start_program_tdelta > end_training_by_tdelta: print(j, 'time limit for doing training over, get out') break print('LR_EXP_DECAY:', LR_EXP_DECAY, '.LR_MAX:', LR_MAX) historys[j] = models[j].fit(get_training_dataset() , steps_per_epoch = STEPS_PER_EPOCH, epochs = EPOCHS, validation_data = get_validation_dataset() , callbacks = [lr_callback, early_stop]) write_history(j) filename = this_run_file_prefix + 'models_' + str(j)+ '.h5' models[j].save(filename) gc.collect() finished_models = j + 1 print(datetime.now())
Petals to the Metal - Flower Classification on TPU
10,378,659
encode(X) encode(df_test_values )<normalization>
cmdataset = get_validation_dataset(ordered = True) images_ds = cmdataset.map(lambda image, label: image) labels_ds = cmdataset.map(lambda image, label: label ).unbatch() cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
Petals to the Metal - Flower Classification on TPU
10,378,659
sc = StandardScaler() X = sc.fit_transform(X) df_test_values = sc.transform(df_test_values )<split>
test_ds = get_test_dataset(ordered = True) test_images_ds = test_ds.map(lambda image, idnum: image)
Petals to the Metal - Flower Classification on TPU
10,378,659
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2 )<compute_test_metric>
dataset = get_validation_dataset() dataset = dataset.unbatch().batch(20) batch = iter(dataset) images, labels = next(batch )
Petals to the Metal - Flower Classification on TPU
10,378,659
def rmsle(y, y_pred): assert len(y)== len(y_pred) terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(terms_to_sum)*(1.0/len(y)))** 0.5<train_model>
print(datetime.now()) for j in range(start_model, end_model): val_probabilities[j] = models[j].predict(images_ds) test_probabilities[j] = models[j].predict(test_images_ds) all_probabilities[j] = models[j].predict(images) print(datetime.now())
Petals to the Metal - Flower Classification on TPU
10,378,659
lm = LinearRegression() lm.fit(X_train,y_train) y_pred_reg = lm.predict(X_test )<feature_engineering>
for j in range(start_model, finished_models): display_training_curves(historys[j].history['loss'], historys[j].history['val_loss'], 'loss', 211) display_training_curves(historys[j].history['sparse_categorical_accuracy'], historys[j].history['val_sparse_categorical_accuracy'], 'accuracy', 212) for j in range(start_model, finished_models): print('model number:', j, ', Train Accuracy:', max(historys[j].history['sparse_categorical_accuracy']), ', Validation Accuracy:', max(historys[j].history['val_sparse_categorical_accuracy'])) for j in range(start_model, finished_models): print('model number:', j, ', Train Loss:', min(historys[j].history['loss']), ', Validation Loss:', min(historys[j].history['val_loss']))
Petals to the Metal - Flower Classification on TPU
10,378,659
lm.intercept_<compute_test_metric>
def getFitPrecisionRecall(correct_labels, predictions): score = f1_score(correct_labels, predictions, labels = range(len(CLASSES)) , average = 'macro') precision = precision_score(correct_labels, predictions, labels = range(len(CLASSES)) , average = 'macro') recall = recall_score(correct_labels, predictions, labels = range(len(CLASSES)) , average = 'macro') return score, precision, recall
Petals to the Metal - Flower Classification on TPU
10,378,659
rmsle(np.exp(y_test), np.exp(y_pred_reg))<train_model>
cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels = range(len(CLASSES))) score, precision, recall = getFitPrecisionRecall(cm_correct_labels, cm_predictions) cmat =(cmat.T / cmat.sum(axis = -1)).T display_confusion_matrix(cmat, score, precision, recall) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall))
Petals to the Metal - Flower Classification on TPU
10,378,659
lasso_model = Lasso() lasso_model.fit(X_train,y_train )<predict_on_test>
def create_submission_file(filename, probabilities): predictions = np.argmax(probabilities, axis = -1) print('Generating submission file...', filename) test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch() test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U') np.savetxt(filename, np.rec.fromarrays([test_ids, predictions]), fmt = ['%s', '%d'], delimiter = ',', header = 'id,label', comments = '')
Petals to the Metal - Flower Classification on TPU
10,378,659
y_pred_lasso = lasso_model.predict(X_test )<compute_test_metric>
probabilities = np.zeros(( test_probabilities[0].shape)) for j in range(no_of_models): probabilities = probabilities + test_probabilities[j] filename = this_run_file_prefix + 'submission.csv' create_submission_file(filename, probabilities) create_submission_file('submission.csv', probabilities )
Petals to the Metal - Flower Classification on TPU
10,378,659
rmsle(np.exp(y_test), np.exp(y_pred_lasso))<train_on_grid>
def combine_two(correct_labels, probability_0, probability_1): print('Start.', datetime.now()) alphas0_to_try = np.linspace(0, 1, 101) best_score = -1 best_alpha0 = -1 best_alpha1 = -1 best_precision = -1 best_recall = -1 best_val_predictions = None for alpha0 in alphas0_to_try: alpha1 = 1.0 - alpha0 probabilities = alpha0 * probability_0 + alpha1 * probability_1 predictions = np.argmax(probabilities, axis = -1) score, precision, recall = getFitPrecisionRecall(correct_labels, predictions) if score > best_score: best_alpha0 = alpha0 best_alpha1 = alpha1 best_score = score best_precision = precision best_recall = recall best_val_predictions = predictions return best_alpha0, best_alpha1, best_val_predictions, best_score, best_precision, best_recall
Petals to the Metal - Flower Classification on TPU
10,378,659
lcv = LassoCV() lcv.fit(X_train,y_train )<set_options>
def combine_three(correct_labels, probability_0, probability_1, probability_2): print('Start.', datetime.now()) alphas0_to_try = np.linspace(0, 1, 101) alphas1_to_try = np.linspace(0, 1, 101) best_score = -1 best_alpha0 = -1 best_alpha1 = -1 best_alpha2 = -1 best_precision = -1 best_recall = -1 best_val_predictions = None for alpha0 in alphas0_to_try: for alpha1 in alphas1_to_try: if(alpha0 + alpha1)> 1.0: break alpha2 = 1.0 - alpha0 - alpha1 probabilities = alpha0 * probability_0 + alpha1 * probability_1 + alpha2 * probability_2 predictions = np.argmax(probabilities, axis = -1) score, precision, recall = getFitPrecisionRecall(correct_labels, predictions) if score > best_score: best_alpha0 = alpha0 best_alpha1 = alpha1 best_alpha2 = alpha2 best_score = score best_precision = precision best_recall = recall best_val_predictions = predictions return best_alpha0, best_alpha1, best_alpha2, best_val_predictions, best_score, best_precision, best_recall
Petals to the Metal - Flower Classification on TPU
10,378,659
lcv.alpha_<predict_on_test>
def get_best_combination(no_models, cm_correct_labels, val_probabilities, test_probabilities): best_fit_score = -10000.0 best_predictions = 0 choose_filename = '' curr_predictions = np.argmax(val_probabilities[0], axis = -1) score, precision, recall = getFitPrecisionRecall(cm_correct_labels, curr_predictions) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall)) filename = this_run_file_prefix + 'submission_0.csv' if best_fit_score < score: best_fit_score = score best_predictions = curr_predictions choose_filename = filename create_submission_file('./submission.csv', test_probabilities[0]) create_submission_file(filename, test_probabilities[0]) if no_models > 1: curr_predictions = np.argmax(val_probabilities[1], axis = -1) score, precision, recall = getFitPrecisionRecall(cm_correct_labels, curr_predictions) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall)) filename = this_run_file_prefix + 'submission_1.csv' if best_fit_score < score: best_fit_score = score best_predictions = curr_predictions choose_filename = filename create_submission_file('./submission.csv', test_probabilities[1]) create_submission_file(filename, test_probabilities[1]) if no_models > 2: curr_predictions = np.argmax(val_probabilities[2], axis = -1) score, precision, recall = getFitPrecisionRecall(cm_correct_labels, curr_predictions) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall)) filename = this_run_file_prefix + 'submission_2.csv' if best_fit_score < score: best_fit_score = score best_predictions = curr_predictions choose_filename = filename create_submission_file('./submission.csv', test_probabilities[2]) create_submission_file(filename, test_probabilities[2]) if no_models > 1: best_alpha0, best_alpha1, best_val_predictions, best_score, best_precision, best_recall = combine_two(cm_correct_labels, val_probabilities[0], val_probabilities[1]) print('For indx', [0, 1], 'best_alpha0:', best_alpha0, 'best_alpha1:', best_alpha1, '.', datetime.now()) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(best_score, best_precision, best_recall)) combined_probabilities = best_alpha0 * test_probabilities[0] + best_alpha1 * test_probabilities[1] filename = this_run_file_prefix + 'submission_01.csv' if best_fit_score < best_score: best_fit_score = best_score best_predictions = best_val_predictions choose_filename = filename create_submission_file('./submission.csv', combined_probabilities) create_submission_file(filename, combined_probabilities) if no_models > 2: best_alpha0, best_alpha1, best_val_predictions, best_score, best_precision, best_recall = combine_two(cm_correct_labels, val_probabilities[0], val_probabilities[2]) print('For indx', [0, 2], 'best_alpha0:', best_alpha0, 'best_alpha1:', best_alpha1, '.', datetime.now()) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(best_score, best_precision, best_recall)) combined_probabilities = best_alpha0 * test_probabilities[0] + best_alpha1 * test_probabilities[2] filename = this_run_file_prefix + 'submission_02.csv' if best_fit_score < best_score: best_fit_score = best_score best_predictions = best_val_predictions choose_filename = filename create_submission_file('./submission.csv', combined_probabilities) create_submission_file(filename, combined_probabilities) best_alpha0, best_alpha1, best_val_predictions, best_score, best_precision, best_recall = combine_two(cm_correct_labels, val_probabilities[1], val_probabilities[2]) print('For indx', [1, 2], 'best_alpha0:', best_alpha0, 'best_alpha1:', best_alpha1, '.', datetime.now()) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(best_score, best_precision, best_recall)) combined_probabilities = best_alpha0 * test_probabilities[1] + best_alpha1 * test_probabilities[2] filename = this_run_file_prefix + 'submission_12.csv' if best_fit_score < best_score: best_fit_score = best_score best_predictions = best_val_predictions choose_filename = filename create_submission_file('./submission.csv', combined_probabilities) create_submission_file(filename, combined_probabilities) best_alpha0, best_alpha1, best_alpha2, best_val_predictions, best_score, best_precision, best_recall = combine_three(cm_correct_labels, val_probabilities[0], val_probabilities[1], val_probabilities[2]) print('For indx', [0, 1, 2], 'best_alpha0:', best_alpha0, 'best_alpha1:', best_alpha1, 'best_alpha2:', best_alpha2, '.', datetime.now()) print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(best_score, best_precision, best_recall)) combined_probabilities = best_alpha0 * test_probabilities[0] + best_alpha1 * test_probabilities[1] + best_alpha2 * test_probabilities[2] filename = this_run_file_prefix + 'submission_012.csv' if best_fit_score < best_score: best_fit_score = best_score best_predictions = best_val_predictions choose_filename = filename create_submission_file('./submission.csv', combined_probabilities) create_submission_file(filename, combined_probabilities) cmat = confusion_matrix(cm_correct_labels, best_predictions, labels = range(len(CLASSES))) cmat =(cmat.T / cmat.sum(axis = -1)).T display_confusion_matrix(cmat, best_fit_score, precision, recall) print('Best score from all combination was', best_fit_score, '.For submission file used is', choose_filename) return best_predictions
Petals to the Metal - Flower Classification on TPU
10,378,659
y_pred_lassocv = lcv.predict(X_test )<compute_test_metric>
best_predictions = cm_predictions if no_of_models > 1: bp = get_best_combination(no_of_models, cm_correct_labels, val_probabilities, test_probabilities) best_predictions = bp
Petals to the Metal - Flower Classification on TPU
10,378,659
rmsle(np.exp(y_test),np.exp(y_pred_lassocv))<choose_model_class>
probabilities = np.zeros(( all_probabilities[0].shape)) for j in range(no_of_models): probabilities = probabilities + all_probabilities[j] predictions = np.argmax(probabilities, axis =-1) display_batch_of_images(( images, labels), predictions )
Petals to the Metal - Flower Classification on TPU
10,378,659
model = Lasso(lcv.alpha_) model.fit(X,y) y_pred = model.predict(df_test_values) predictions = np.exp(y_pred )<save_to_csv>
val_probs = [cm_correct_labels, cm_predictions, val_probabilities[0], val_probabilities[1], test_probabilities[0], test_probabilities[1]] filename = this_run_file_prefix + 'tests_vals_01.pkl' pklfile = open(filename, 'ab') pickle.dump(val_probs, pklfile) pklfile.close()
Petals to the Metal - Flower Classification on TPU
10,378,659
result=pd.DataFrame({'Id':df_test.index, 'SalePrice':predictions}) result.to_csv('submission.csv', index=False )<set_options>
use_correct_labels = cm_correct_labels use_val_predictions = best_predictions
Petals to the Metal - Flower Classification on TPU
10,378,659
warnings.filterwarnings('ignore' )<compute_test_metric>
correct_labels_cnt = 0 incorrect_labels_cnt = 0 correct_labels = [] incorrect_labels = [] vals_actual_true = {} vals_tp = {} vals_fn = {} vals_fp = {} for i in range(len(CLASSES)) : vals_actual_true[i] = 0 vals_tp[i] = 0 vals_fn[i] = 0 vals_fp[i] = 0 for i in range(len(use_correct_labels)) : correct_label = use_correct_labels[i] predict_label = use_val_predictions[i] vals_actual_true[correct_label] = vals_actual_true[correct_label] + 1 if use_val_predictions[i] != use_correct_labels[i]: incorrect_labels_cnt = incorrect_labels_cnt + 1 incorrect_labels.append(i) vals_fn[correct_label] = vals_fn[correct_label] + 1 vals_fp[predict_label] = vals_fp[predict_label] + 1 else: correct_labels_cnt = correct_labels_cnt + 1 correct_labels.append(i) vals_tp[correct_label] = vals_tp[correct_label] + 1 print('Number of correct_labels is {}, incorrect_labels is {}'.format(correct_labels_cnt, incorrect_labels_cnt)) print('Incorrect labels', incorrect_labels)
Petals to the Metal - Flower Classification on TPU
10,378,659
def rmse_cv(model, X, y): rmse = np.sqrt(-cross_val_score(model, X_train, y_train, scoring="neg_mean_squared_error", cv=kfolds)) return(rmse) def rmsle(y, y_pred): return(np.sqrt(metrics.mean_squared_error(y, y_pred))) def CVscore(model, X, y): result = cross_val_score(model, X, y, cv=kfold) return result.mean() def blend_models_predict(X, models): blend = [] for model in models: blend.append(1/len(models)*model.predict(X)) return sum(blend )<load_from_csv>
def display_my_batch_of_images(databatch, rows = 0, cols = 0, predictions=None): images, labels = databatch if labels is None: labels = [None for _ in enumerate(images)] if rows == 0 or cols == 0: rows = int(math.sqrt(len(images))) cols =(len(images)+ rows - 1)//rows print('Total number of images is {}, rows is {}, cols is {}'.format(len(images), rows, cols)) FIGSIZE = 20.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show()
Petals to the Metal - Flower Classification on TPU
10,378,659
df_train = pd.read_csv('.. /input/train.csv',index_col='Id') df_test = pd.read_csv('.. /input/test.csv',index_col='Id' )<drop_column>
disp_labels = [] disp_predictions = [] for i in range(54): if i >= incorrect_labels_cnt: break id = incorrect_labels[i] disp_labels.append(use_correct_labels[id]) disp_predictions.append(use_val_predictions[id]) print(disp_labels) print(disp_predictions )
Petals to the Metal - Flower Classification on TPU
10,378,659
<define_variables><EOS>
val_ids = list(range(len(use_correct_labels))) filename = this_run_file_prefix + 'validation_results.csv' np.savetxt(filename, np.rec.fromarrays([val_ids, use_correct_labels, use_val_predictions]), fmt = ['%d', '%d', '%d'], delimiter = ',', header = 'id,correct_label,predicted_label', comments = '') cls_ids = list(range(len(CLASSES))) filename = this_run_file_prefix + 'validation_statistics.csv' np.savetxt(filename, np.rec.fromarrays([cls_ids, list(vals_actual_true.values()), list(vals_tp.values()), list(vals_fn.values()), list(vals_fp.values())]), fmt = ['%d', '%d', '%d', '%d', '%d'], delimiter = ',', header = 'cls_id,actual_true,true_positive,false_negative,false_positive', comments = '' )
Petals to the Metal - Flower Classification on TPU
10,218,760
<SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<prepare_x_and_y>
print("TF version " + tf.__version__) AUTO = tf.data.experimental.AUTOTUNE
Petals to the Metal - Flower Classification on TPU
10,218,760
y = df_train['SalePrice']<concatenate>
try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Running on TPU ', tpu.master()) except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() print("REPLICAS: ", strategy.num_replicas_in_sync )
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all = pd.concat([df_train, df_test], sort=False )<categorify>
IMAGE_SIZE = [512, 512] EPOCHS = 40 BATCH_SIZE = 16 * strategy.num_replicas_in_sync SEED = 752 SKIP_VALIDATION = False TTA_NUM = 5 random.seed(SEED) np.random.seed(SEED) tf.random.set_seed(SEED )
Petals to the Metal - Flower Classification on TPU
10,218,760
Cat_toNone =('PoolQC','Fence','MiscFeature','Alley','FireplaceQu','GarageType','GarageFinish', 'GarageQual','GarageCond','BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1', 'BsmtFinType2','MasVnrType','MSSubClass') for c in Cat_toNone: df_all[c] = df_all[c].fillna('None' )<categorify>
np.set_printoptions(threshold=15, linewidth=80) def batch_to_numpy_images_and_labels(data): images, labels = data numpy_images = images.numpy() numpy_labels = labels.numpy() if numpy_labels.dtype == object: numpy_labels = [None for _ in enumerate(numpy_images)] return numpy_images, numpy_labels def title_from_label_and_target(label, correct_label): if correct_label is None: return CLASSES[label], True correct =(label == correct_label) return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '', CLASSES[correct_label] if not correct else ''), correct def display_one_flower(image, title, subplot, red=False, titlesize=16): plt.subplot(*subplot) plt.axis('off') plt.imshow(image) if len(title)> 0: plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5)) return(subplot[0], subplot[1], subplot[2]+1) def display_batch_of_images(databatch, predictions=None): images, labels = batch_to_numpy_images_and_labels(databatch) if labels is None: labels = [None for _ in enumerate(images)] rows = int(math.sqrt(len(images))) cols = len(images)//rows FIGSIZE = 13.0 SPACING = 0.1 subplot=(rows,cols,1) if rows < cols: plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows)) else: plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE)) for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) : title = '' if label is None else CLASSES[label] correct = True if predictions is not None: title, correct = title_from_label_and_target(predictions[i], label) dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize) plt.tight_layout() if label is None and predictions is None: plt.subplots_adjust(wspace=0, hspace=0) else: plt.subplots_adjust(wspace=SPACING, hspace=SPACING) plt.show() def display_confusion_matrix(cmat, score, precision, recall): plt.figure(figsize=(15,15)) ax = plt.gca() ax.matshow(cmat, cmap='Reds') ax.set_xticks(range(len(CLASSES))) ax.set_xticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_xticklabels() , rotation=45, ha="left", rotation_mode="anchor") ax.set_yticks(range(len(CLASSES))) ax.set_yticklabels(CLASSES, fontdict={'fontsize': 7}) plt.setp(ax.get_yticklabels() , rotation=45, ha="right", rotation_mode="anchor") titlestring = "" if score is not None: titlestring += 'f1 = {:.3f} '.format(score) if precision is not None: titlestring += ' precision = {:.3f} '.format(precision) if recall is not None: titlestring += ' recall = {:.3f} '.format(recall) if len(titlestring)> 0: ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':' plt.show() def display_training_curves(training, validation, title, subplot): if subplot%10==1: plt.subplots(figsize=(10,10), facecolor=' plt.tight_layout() ax = plt.subplot(subplot) ax.set_facecolor(' ax.plot(training) ax.plot(validation) ax.set_title('model '+ title) ax.set_ylabel(title) ax.set_xlabel('epoch') ax.legend(['train', 'valid.'] )
Petals to the Metal - Flower Classification on TPU
10,218,760
Cat_toMode =('MSZoning','Electrical','KitchenQual','Exterior1st','Exterior2nd', 'Utilities','SaleType','Functional') for c in Cat_toMode: df_all[c] = df_all[c].fillna(df_all[c].mode() [0] )<data_type_conversions>
def random_blockout(img, sl=0.1, sh=0.2, rl=0.4): p=random.random() if p>=0.25: w, h, c = IMAGE_SIZE[0], IMAGE_SIZE[1], 3 origin_area = tf.cast(h*w, tf.float32) e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32) e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32) e_height_h = tf.minimum(e_size_h, h) e_width_h = tf.minimum(e_size_h, w) erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32) erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32) erase_area = tf.zeros(shape=[erase_height, erase_width, c]) erase_area = tf.cast(erase_area, tf.uint8) pad_h = h - erase_height pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32) pad_bottom = pad_h - pad_top pad_w = w - erase_width pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32) pad_right = pad_w - pad_left erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1) erase_mask = tf.squeeze(erase_mask, axis=0) erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32)) return tf.cast(erased_img, img.dtype) else: return tf.cast(img, img.dtype )
Petals to the Metal - Flower Classification on TPU
10,218,760
Num_toZero =('GarageArea','GarageCars','BsmtFinSF1','BsmtFinSF2','BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath','BsmtHalfBath','GarageYrBlt','MasVnrArea') for c in Num_toZero: df_all[c] = df_all[c].fillna(0 )<feature_engineering>
def decode_image(image_data): image = tf.image.decode_jpeg(image_data, channels=3) image = tf.cast(image, tf.float32)/ 255.0 image = tf.reshape(image, [*IMAGE_SIZE, 3]) return image def read_labeled_tfrecord(example): LABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "class": tf.io.FixedLenFeature([], tf.int64), } example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT) image = decode_image(example['image']) label = tf.cast(example['class'], tf.int32) return image, label def read_unlabeled_tfrecord(example): UNLABELED_TFREC_FORMAT = { "image": tf.io.FixedLenFeature([], tf.string), "id": tf.io.FixedLenFeature([], tf.string), } example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT) image = decode_image(example['image']) idnum = example['id'] return image, idnum def load_dataset(filenames, labeled=True, ordered=False): ignore_order = tf.data.Options() if not ordered: ignore_order.experimental_deterministic = False dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO) dataset = dataset.with_options(ignore_order) dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO) return dataset def data_augment(image, label): image = tf.image.random_flip_left_right(image, seed=SEED) image = random_blockout(image) return image, label def get_training_dataset() : dataset = load_dataset(TRAINING_FILENAMES, labeled=True) dataset = dataset.map(data_augment, num_parallel_calls=AUTO) dataset = dataset.repeat() dataset = dataset.shuffle(2048) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def get_validation_dataset(ordered=False): dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.cache() dataset = dataset.prefetch(AUTO) return dataset def get_test_dataset(ordered=False): dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered) dataset = dataset.batch(BATCH_SIZE) dataset = dataset.prefetch(AUTO) return dataset def count_data_items(filenames): n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames] return np.sum(n) NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES) NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES) NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES) STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all["LotFrontage"] = df_all.groupby("Neighborhood")["LotFrontage"].transform(lambda x: x.fillna(x.median()))<data_type_conversions>
if SKIP_VALIDATION: TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all['MSSubClass'] = df_all['MSSubClass'].astype(str )<feature_engineering>
print("Training data shapes:") for image, label in get_training_dataset().take(3): print(image.numpy().shape, label.numpy().shape) print("Training data label examples:", label.numpy()) print("Validation data shapes:") for image, label in get_validation_dataset().take(3): print(image.numpy().shape, label.numpy().shape) print("Validation data label examples:", label.numpy()) print("Test data shapes:") for image, idnum in get_test_dataset().take(3): print(image.numpy().shape, idnum.numpy().shape) print("Test data IDs:", idnum.numpy().astype('U'))
Petals to the Metal - Flower Classification on TPU
10,218,760
skewed_feats = df_all[numeric].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index df_all[skewed_feats] = np.log1p(df_all[skewed_feats]) <categorify>
training_dataset = get_training_dataset() training_dataset = training_dataset.unbatch().batch(20) train_batch = iter(training_dataset )
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all = df_all.replace({"Alley" : {"None" : 0, "Grvl" : 1, "Pave" : 2}, "BsmtCond" : {"None" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "BsmtExposure" : {"None" : 0, "Mn" : 1, "Av": 2, "Gd" : 3}, "BsmtFinType1" : {"None" : 0, "Unf" : 1, "LwQ": 2, "Rec" : 3, "BLQ" : 4, "ALQ" : 5, "GLQ" : 6}, "BsmtFinType2" : {"None" : 0, "Unf" : 1, "LwQ": 2, "Rec" : 3, "BLQ" : 4, "ALQ" : 5, "GLQ" : 6}, "BsmtQual" : {"None" : 0, "Po" : 1, "Fa" : 2, "TA": 3, "Gd" : 4, "Ex" : 5}, "ExterCond" : {"Po" : 1, "Fa" : 2, "TA": 3, "Gd": 4, "Ex" : 5}, "ExterQual" : {"Po" : 1, "Fa" : 2, "TA": 3, "Gd": 4, "Ex" : 5}, "FireplaceQu" : {"None" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "Functional" : {"Sal" : 1, "Sev" : 2, "Maj2" : 3, "Maj1" : 4, "Mod": 5, "Min2" : 6, "Min1" : 7, "Typ" : 8}, "GarageCond" : {"None" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "GarageQual" : {"None" : 0, "Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "HeatingQC" : {"Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "KitchenQual" : {"Po" : 1, "Fa" : 2, "TA" : 3, "Gd" : 4, "Ex" : 5}, "LandSlope" : {"Sev" : 1, "Mod" : 2, "Gtl" : 3}, "LotShape" : {"IR3" : 1, "IR2" : 2, "IR1" : 3, "Reg" : 4}, "PavedDrive" : {"N" : 0, "P" : 1, "Y" : 2}, "PoolQC" : {"None" : 0, "Fa" : 1, "TA" : 2, "Gd" : 3, "Ex" : 4}, "Street" : {"Grvl" : 1, "Pave" : 2}, "Utilities" : {"ELO" : 1, "NoSeWa" : 2, "NoSewr" : 3, "AllPub" : 4}} )<feature_engineering>
display_batch_of_images(next(train_batch))
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all['Bathrooms'] = df_all['FullBath'] +(df_all['HalfBath']*0.5)+ df_all['BsmtFullBath'] +(df_all['BsmtHalfBath']*0.5) df_all['FireplaceScore'] = df_all['Fireplaces'] * df_all['FireplaceQu'] df_all['GarageScore'] = df_all['GarageCars'] * df_all['GarageQual'] * df_all['GarageArea'] df_all['HouseAge'] = df_all['YrSold'] - df_all['YearBuilt'] df_all['TotalLivingSF'] = df_all['GrLivArea'] + df_all['TotalBsmtSF'] - df_all['LowQualFinSF']<drop_column>
test_dataset = get_test_dataset() test_dataset = test_dataset.unbatch().batch(20) test_batch = iter(test_dataset )
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all = df_all.drop(['FullBath', 'HalfBath', 'BsmtFullBath', 'BsmtHalfBath', 'GarageCars','GarageArea','GarageQual','GarageCond', 'FireplaceQu','Fireplaces', 'PoolArea', 'Utilities', 'Street', 'GarageYrBlt' ], axis = 1 )<categorify>
display_batch_of_images(next(test_batch))
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all = pd.get_dummies(df_all, drop_first=True )<drop_column>
LR_START = 0.00001 LR_MAX = 0.00005 * strategy.num_replicas_in_sync LR_MIN = 0.00001 LR_RAMPUP_EPOCHS = 5 LR_SUSTAIN_EPOCHS = 0 LR_EXP_DECAY =.8 def lrfn(epoch): if epoch < LR_RAMPUP_EPOCHS: lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS: lr = LR_MAX else: lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN return lr lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose = True) rng = [i for i in range(25 if EPOCHS<25 else EPOCHS)] y = [lrfn(x)for x in rng] plt.plot(rng, y) print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
Petals to the Metal - Flower Classification on TPU
10,218,760
df_all = df_all.drop(['SalePrice'], axis = 1) train = df_all[:len_train] test = df_all[len_train:]<split>
!pip install -q efficientnet
Petals to the Metal - Flower Classification on TPU
10,218,760
X_train, X_test, y_train, y_test = train_test_split(train, y, test_size = 0.3, random_state = 0 )<choose_model_class>
with strategy.scope() : enet = efn.EfficientNetB7(input_shape=[*IMAGE_SIZE, 3], weights='noisy-student', include_top=False) enet.trainable = True model1 = tf.keras.Sequential([ enet, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) model1.compile( optimizer=tf.keras.optimizers.Adam() , loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) model1.summary()
Petals to the Metal - Flower Classification on TPU
10,218,760
regressor = LinearRegression() regressor.fit(X_train, y_train) train_lm = regressor.predict(X_train) test_lm = regressor.predict(X_test) ridge = Ridge(alpha = 10) ridge.fit(X_train, y_train) ridge_train_lm = ridge.predict(X_train) ridge_test_lm = ridge.predict(X_test) lasso = Lasso(max_iter=500,alpha = 0.001) lasso.fit(X_train, y_train) lasso_train_lm = lasso.predict(X_train) lasso_test_lm = lasso.predict(X_test) en = ElasticNet(max_iter=500,alpha = 0.001) en.fit(X_train, y_train) en_train_lm = en.predict(X_train) en_test_lm = en.predict(X_test) dtr = DecisionTreeRegressor(max_depth=5) dtr.fit(X_train, y_train) train_dtr = dtr.predict(X_train) test_dtr = dtr.predict(X_test) rf = RandomForestRegressor(random_state = 0) rf.fit(X_train, y_train) train_rf = rf.predict(X_train) test_rf = rf.predict(X_test )<compute_train_metric>
if not SKIP_VALIDATION: history1 = model1.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, validation_data=get_validation_dataset() , callbacks = [lr_callback]) else: history1 = model1.fit(get_training_dataset() , steps_per_epoch=STEPS_PER_EPOCH, epochs=EPOCHS, callbacks = [lr_callback] )
Petals to the Metal - Flower Classification on TPU
10,218,760
scorer = metrics.make_scorer(metrics.mean_squared_error, greater_is_better = False) kfolds = KFold(n_splits=10, shuffle=True, random_state=42 )<compute_train_metric>
with strategy.scope() : densenet = tf.keras.applications.DenseNet201(input_shape=[*IMAGE_SIZE, 3], weights='imagenet', include_top=False) densenet.trainable = True model2 = tf.keras.Sequential([ densenet, tf.keras.layers.GlobalAveragePooling2D() , tf.keras.layers.Dense(len(CLASSES), activation='softmax') ]) model2.compile( optimizer=tf.keras.optimizers.Adam() , loss = 'sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) model2.summary()
Petals to the Metal - Flower Classification on TPU