kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,683,657 | grouped=dftrain.groupby(['Country_Region','Province_State'] ).tail(daybasecount*2)
grouped=grouped.groupby(['Country_Region','Province_State'] ).head(daybasecount)
grouped.drop(['FatalityBasis'],axis=1,inplace=True)
to_sum = ['NewCases','NewFatalities']
grouped1 = grouped.groupby(['Country_Region'])[to_sum].sum()
grouped1.rename(columns={'NewCases':'NewCases1','NewFatalities':'NewFatalities1'}, inplace=True)
grouped = pd.merge(grouped1, grouped2, on=['Country_Region'])
grouped['CasesIncreasePct'] = 100*(grouped['NewCases2']/grouped['NewCases1']-1)
mask = grouped['CasesIncreasePct'] > maxincrease
grouped.loc[mask,'CasesIncreasePct'] = maxincrease
mask = grouped['CasesIncreasePct'] < 0
grouped.loc[mask,'CasesIncreasePct'] = 0
mask = grouped['CasesIncreasePct'].isnull()
grouped.loc[mask,'CasesIncreasePct'] = 0
grouped['Factor'] =(grouped['CasesIncreasePct']/100+1)**exponent
grouped = pd.merge(grouped, ppp_tabel, on=['Country_Region'])
grouped['ppp'] = grouped['ppp']/10000.
if False:
mask =(grouped['FatalityPct2'] > 9)&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 5
mask =(grouped['FatalityPct2'] < 5)&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 5
mask =(grouped['FatalityPct2'] > 6)&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 6
mask =(grouped['FatalityPct2'] < 1.5)&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 1.5
mask =(grouped['FatalityPct2'] >(9.5 - 0.43*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(9.5 - 0.43*grouped['ppp'])
mask =(grouped['FatalityPct2'] <(5.6 - 0.5*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(5.6 - 0.5*grouped['ppp'])
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 7
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 4
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(7.5 - 0.5*grouped['ppp'])
else:
mask =(grouped['FatalityPct2'] > 4)&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 3
mask =(grouped['FatalityPct2'] < 1)&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 2
mask =(grouped['FatalityPct2'] > 1.5)&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 1.5
mask =(grouped['FatalityPct2'] < 0.5)&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 0.5
mask =(grouped['FatalityPct2'] >(4.5 - 0.43*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(4.5 - 0.43*grouped['ppp'])
mask =(grouped['FatalityPct2'] <(1.1 - 0.1*grouped['ppp'])) &(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(1.1 - 0.1*grouped['ppp'])
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] <= 1)
grouped.loc[mask,'FatalityPct2'] = 3
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] >= 7)
grouped.loc[mask,'FatalityPct2'] = 1
mask =(grouped['FatalityPct2'].isnull())&(grouped['ppp'] > 1)&(grouped['ppp'] < 7)
grouped.loc[mask,'FatalityPct2'] =(2.6 - 0.23*grouped['ppp'] )<merge> | test_data = tf.data.TFRecordDataset(
tf.io.gfile.glob(GCS_DS_PATH + '/tfrecords-jpeg-' + str(IMAGE_SIZE)+ 'x' + str(IMAGE_SIZE)+ '/test/*.tfrec'),
num_parallel_reads = tf.data.experimental.AUTOTUNE
)
test_data = test_data.with_options(tf.data.Options())
test_data = test_data.map(read_unlabeled_tfrecord, num_parallel_calls = tf.data.experimental.AUTOTUNE)
test_data = test_data.batch(BATCH_SIZE)
test_data = test_data.prefetch(tf.data.experimental.AUTOTUNE ) | Petals to the Metal - Flower Classification on TPU |
10,683,657 | dftest.drop('_merge',axis=1,inplace= True)
dftest = dftest.merge(grouped[['Country_Region','FatalityPct2','Factor']], on=['Country_Region'], how='left')
dftest = dftest.merge(grouped_gem[['Province_State','Country_Region','ConfirmedCases_base','ConfirmedCases_init','NewCases_base','Fatalities_init','FatalityBasis']], on=['Province_State','Country_Region'], how='left')
<feature_engineering> | test_images = test_data.map(lambda image, idnum: image)
probabilities = model.predict(test_images)
predictions = np.argmax(probabilities, axis=-1 ) | Petals to the Metal - Flower Classification on TPU |
10,683,657 | dftest['ConfirmedCases_shift'] = dftest.groupby(['Country_Region', 'Province_State'])[['ConfirmedCases']].transform(lambda x: x.shift(1))
mask = dftest['ConfirmedCases'].isnull()
dftest.loc[mask,'NewCases'] = dftest.loc[mask,'NewCases_base']*(dftest.loc[mask,'Factor']**dftest.loc[mask,'Expo'])
dftest['NewCases_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewCases']].cumsum()
dftest.loc[mask,'ConfirmedCases'] = dftest.loc[mask,'ConfirmedCases_init'] + dftest.loc[mask,'NewCases_cum']
mask3 = dftest['ConfirmedCases'] > 400000
dftest.loc[mask3,'FatalityPct2'] = dftest.loc[mask3,'FatalityPct2']*0.7
mask4 = dftest['ConfirmedCases'] > 800000
dftest.loc[mask4,'FatalityPct2'] = dftest.loc[mask4,'FatalityPct2']*0.7
dftest['FatalityBasis'] = dftest.groupby(['Country_Region', 'Province_State'])[
['ConfirmedCases']].transform(lambda x: x.shift(10))
dftest.loc[mask,'NewFatalities'] = dftest.loc[mask,'FatalityBasis'] * dftest.loc[mask,'FatalityPct2']/100
mask2 = dftest['NewFatalities'] > maxDeadPrDay
dftest.loc[mask2,'NewFatalities'] = maxDeadPrDay
dftest['NewFatalities_cum'] = dftest.groupby(['Country_Region', 'Province_State'])[['NewFatalities']].cumsum()
dftest.loc[mask,'Fatalities'] = dftest.loc[mask,'Fatalities_init'] + dftest.loc[mask,'NewFatalities_cum']
<drop_column> | ids = []
for image, image_ids in test_data.take(NUM_TEST_IMAGES):
ids.append(image_ids.numpy())
ids = np.concatenate(ids, axis=None ).astype(str ) | Petals to the Metal - Flower Classification on TPU |
10,683,657 | dftest.drop(['Dayofyear',
'Expo','FatalityPct2', 'Factor',
'ConfirmedCases_base', 'ConfirmedCases_init',
'NewCases_base', 'Fatalities_init', 'FatalityBasis',
'ConfirmedCases_shift',
'NewCases', 'NewCases_cum', 'NewFatalities','NewFatalities_cum'],axis=1,inplace=True)
final = dftest.groupby(['Country_Region','Province_State'] ).tail(1)
dftest.drop(['Province_State'],axis=1,inplace=True)
dftest.rename(columns={'Province_State_orig':'Province_State'},inplace=True )<count_missing_values> | submission = pd.DataFrame(data={'id': ids, 'label': predictions})
submission.to_csv('submission.csv', index=False ) | Petals to the Metal - Flower Classification on TPU |
10,683,657 | mask = dftest["ConfirmedCases"].isnull()
print(mask.sum())
errors = dftest.loc[mask]
print(errors)
mask = dftest["Fatalities"].isnull()
print(mask.sum())
errors = dftest.loc[mask]
print(errors)
dftest.drop(['Province_State','Country_Region','Date'],axis=1,inplace=True)
print("dftest columns =",dftest.columns)
<save_to_csv> | model.save('model.h5' ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | dftest.ForecastId = dftest.ForecastId.astype('int')
dftest['ConfirmedCases'] = dftest['ConfirmedCases'].round().astype(int)
dftest['Fatalities'] = dftest['Fatalities'].round().astype(int)
dftest.to_csv('submission.csv', index=False)
<import_modules> | import math, re, os
import numpy as np
import tensorflow as tf | Petals to the Metal - Flower Classification on TPU |
14,332,861 | from sklearn.preprocessing import StandardScaler, MinMaxScaler, OneHotEncoder
from sklearn.model_selection import train_test_split, KFold, cross_val_score, GridSearchCV
from sklearn import preprocessing
import lightgbm as lgb
import optuna
import glob<define_variables> | print(tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | path = '.. /input/google-cloud-ncaa-march-madness-2020-division-1-womens-tournament/'
Files = 'WDataFiles_Stage1/'<load_from_csv> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | TourneyCompactResults = pd.read_csv(path + Files + 'WNCAATourneyCompactResults.csv')
GameCities = pd.read_csv(path+Files+'WGameCities.csv')
Seasons = pd.read_csv(path+Files+'WSeasons.csv')
TourneySeeds = pd.read_csv(path+Files+'WNCAATourneySeeds.csv')
RegularSeasonCompactResults = pd.read_csv(path+Files+'WRegularSeasonCompactResults.csv' )<load_from_csv> | GCS_DS_PATH=KaggleDatasets().get_gcs_path('tpu-getting-started')
print(GCS_DS_PATH ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv' )<feature_engineering> | IMAGE_SIZE=[512,512]
GCS_PATH=GCS_DS_PATH+'/tfrecords-jpeg-512x512'
AUTO=tf.data.experimental.AUTOTUNE
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle',
'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily',
'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose',
'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion',
'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia',
'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy',
'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium',
'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily',
'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower',
'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
def decode_image(image_data):
image = tf.image.decode_jpeg(image_data,channels=3)
image = tf.cast(image,tf.float32)/255.0
image=tf.reshape(image,[*IMAGE_SIZE,3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([],tf.string),
"class": tf.io.FixedLenFeature([],tf.int64)
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image,label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([],tf.string),
"id": tf.io.FixedLenFeature([],tf.string)
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order=tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic =False
dataset=tf.data.TFRecordDataset(filenames,num_parallel_reads=AUTO)
dataset=dataset.with_options(ignore_order)
dataset=dataset.map(read_labeled_tfrecord if labeled else
read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset | Petals to the Metal - Flower Classification on TPU |
14,332,861 | TourneySeeds['Seed'] = TourneySeeds['Seed'].apply(lambda x: int(x[1:3]))
print(TourneySeeds.shape)
TourneySeeds.head()<categorify> | BATCH_SIZE = 16*strategy.num_replicas_in_sync
ds_train = get_training_dataset()
ds_valid = get_validation_dataset()
ds_test = get_test_dataset()
print("Training: ",ds_train)
print("Validation: ",ds_valid)
print("Test: ",ds_test ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | train = train.replace({'H':0,'A':1,'N':2})
train.head()<categorify> | np.set_printoptions(threshold=15, linewidth=80)
print("Training data shapes:")
for image,label in ds_train.take(3):
print(image.numpy().shape,label.numpy().shape)
print("Training data label examples:",label.numpy() ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | le = preprocessing.LabelEncoder()
for column in ['CRType']:
le.fit(GameCities[column])
GameCities[column] = le.transform(GameCities[column])
GameCities.head()<categorify> | print("Test data shapes:")
for image,idnum in ds_test.take(3):
print(image.numpy().shape,idnum.numpy().shape)
print("Test data IDs:",idnum.numpy().astype('U')) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | le = preprocessing.LabelEncoder()
for column in ['RegionW','RegionX','RegionY','RegionZ']:
le.fit(Seasons[column])
Seasons[column] = le.transform(Seasons[column])
for i in range(0,23):
print(Seasons['DayZero'][i].split('/'))
Seasons['ZeroMonth'] = Seasons['DayZero'][i].split('/')[0]
Seasons['ZeroDay'] = Seasons['DayZero'][i].split('/')[1]
Seasons['ZeroYear'] = Seasons['DayZero'][i].split('/')[2]
Seasons = Seasons.drop('DayZero',axis=1)
Seasons['ZeroMonth'] = Seasons['ZeroMonth'].astype(int)
Seasons['ZeroDay'] = Seasons['ZeroMonth'].astype(int)
Seasons['ZeroYear'] = Seasons['ZeroMonth'].astype(int)
Seasons.head()<merge> | def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype ==object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else
'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)>0:
plt.title(title, fontsize=int(titlesize)if not red
else int(titlesize/1.2), color ='red' if red else
'black',fontdict={'verticalalignment':'center'},
pad = int(titlesize/1.5))
return(subplot[0],subplot[1],subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
images,labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows=int(math.sqrt(len(images)))
cols =len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot =(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols],labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct =title_from_label_and_target(predictions[i],label)
dynamic_titlesize=FIGSIZE*SPACING/max(rows,cols)*40+3
subplot=display_one_flower(image, title, subplot,
not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspce=0, hspace=0)
else:
plt.subplots_adjust(wspace=0, hspace=SPACING)
plt.show()
def display_training_curves(training, validation, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10),facecolor='
plt.tight_layout()
ax=plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model' + title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train','valid'] ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | train = train.merge(Seasons, how='left',on=['Season'])
train.head()<merge> | ds_iter = iter(ds_train.unbatch().batch(20)) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | train = train.merge(TourneySeeds, how='left', left_on=['Season', 'WTeamID'], right_on=['Season','TeamID'])
train = train.drop('TeamID',axis=1)
train = train.rename(columns={'Seed': 'WSeed'})
train = train.merge(TourneySeeds, how='left', left_on=['Season', 'LTeamID'], right_on=['Season','TeamID'])
train = train.drop('TeamID',axis=1)
train = train.rename(columns={'Seed': 'LSeed'})
train.head()<feature_engineering> | dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
one_element = tf.data.Dataset.from_tensors(next(iter(dataset)))
augmented_element = one_element.repeat().map(data_augment ).batch(25)
display_batch_of_images(next(iter(augmented_element)) ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test = test.drop(['Pred'], axis=1)
test['Season'] = test['ID'].apply(lambda x: int(x.split('_')[0]))
test['WTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[1]))
test['LTeamID'] = test['ID'].apply(lambda x: int(x.split('_')[2]))
test.head()<merge> | EPOCHS=20
def exponential_lr(epoch,
start_lr=0.00001,min_lr=0.00001,max_lr=0.00005,
rampup_epochs = 5, sustain_epochs = 0,
exp_decay = 0.8):
def lr(epoch, start_lr, min_lr,max_lr,rampup_epochs,sustain_epochs,
exp_decay):
if epoch < rampup_epochs:
lr=(( max_lr-start_lr)/
rampup_epochs * epoch + start_lr)
elif epoch < rampup_epochs + sustain_epochs:
lr = max_lr
else:
lr =(( max_lr - min_lr)* exp_decay **(epoch-rampup_epochs-sustain_epochs)
+ min_lr)
return lr
return lr(epoch,start_lr,min_lr,max_lr,rampup_epochs,sustain_epochs,exp_decay)
lr_callback = tf.keras.callbacks.LearningRateScheduler(exponential_lr,verbose=True)
rng = [i for i in range(EPOCHS)]
y = [exponential_lr(x)for x in rng]
plt.plot(rng,y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0],
max(y),
y[-1])) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test = test.merge(TourneyCompactResults,how='left',on=['Season','WTeamID','LTeamID'] )<merge> | EPOCHS = 20
with strategy.scope() :
pretained_model=tf.keras.applications.EfficientNetB7(
weights='imagenet',
include_top=False,
input_shape=[*IMAGE_SIZE,3]
)
pretained_model.trainable = True
model = tf.keras.Sequential([
pretained_model,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(len(CLASSES),activation='softmax')] ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test = test.replace({'H':0,'A':1,'N':2})
test = test.merge(Seasons, how='left',on=['Season'])
test = test.merge(TourneySeeds, how='left', left_on=['Season', 'WTeamID'], right_on=['Season','TeamID'])
test = test.drop('TeamID',axis=1)
test = test.rename(columns={'Seed': 'WSeed'})
test = test.merge(TourneySeeds, how='left', left_on=['Season', 'LTeamID'], right_on=['Season','TeamID'])
test = test.drop('TeamID',axis=1)
test = test.rename(columns={'Seed': 'LSeed'})
<merge> | model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['sparse_categorical_accuracy'])
model.summary() | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test.merge(test,how='left',on=['ID','Season','WTeamID','LTeamID'] )<drop_column> | earlystopping=EarlyStopping(monitor='val_loss',patience=2 ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | not_exist_in_test = [c for c in train.columns.values.tolist() if c not in test.columns.values.tolist() ]
print(not_exist_in_test)
train = train.drop(not_exist_in_test, axis=1)
train.head()<groupby> | EPOCHS = 20
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES//BATCH_SIZE
history = model.fit(ds_train,validation_data=ds_valid,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,callbacks=[lr_callback,earlystopping]
) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | team_win_score = RegularSeasonCompactResults.groupby(['Season', 'WTeamID'] ).agg({'WScore':['sum', 'count', 'var']} ).reset_index()
team_win_score.columns = [' '.join(col ).strip() for col in team_win_score.columns.values]
team_loss_score = RegularSeasonCompactResults.groupby(['Season', 'LTeamID'] ).agg({'LScore':['sum', 'count', 'var']} ).reset_index()
team_loss_score.columns = [' '.join(col ).strip() for col in team_loss_score.columns.values]
print(team_win_score.shape)
team_win_score.head()<merge> | cmdataset=get_validation_dataset(ordered=True)
image_ds=cmdataset.map(lambda image, label:image)
labels_ds=cmdataset.map(lambda image, label:label ).unbatch()
cm_correct_labels=next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
cm_probabilities=model.predict(image_ds)
cm_predictions = np.argmax(cm_probabilities,axis=-1)
labels = range(len(CLASSES))
cmat = confusion_matrix(
cm_correct_labels,
cm_predictions,
labels=labels,)
cmat=(cmat.T/cmat.sum(axis=1)).T | Petals to the Metal - Flower Classification on TPU |
14,332,861 | train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
train = pd.merge(train, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
train.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
train.head()<merge> | score=f1_score(cm_correct_labels,cm_predictions,labels=labels,average='macro')
precision=precision_score(cm_correct_labels,cm_predictions,labels=labels,average='macro')
recall=recall_score(cm_correct_labels,cm_predictions,labels=labels,average='macro')
display_confusion_matrix(cmat,score,precision,recall ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'WTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'LTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_loss_score, how='left', left_on=['Season', 'WTeamID'], right_on=['Season', 'LTeamID'])
test = pd.merge(test, team_win_score, how='left', left_on=['Season', 'LTeamID_x'], right_on=['Season', 'WTeamID'])
test.drop(['LTeamID_y', 'WTeamID_y'], axis=1, inplace=True)
test.head()
<feature_engineering> | dataset=get_validation_dataset()
dataset=dataset.unbatch().batch(20)
batch=iter(dataset ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | def preprocess(df):
df['x_score'] = df['WScore sum_x'] + df['LScore sum_y']
df['y_score'] = df['WScore sum_y'] + df['LScore sum_x']
df['x_count'] = df['WScore count_x'] + df['LScore count_y']
df['y_count'] = df['WScore count_y'] + df['WScore count_x']
df['x_var'] = df['WScore var_x'] + df['LScore count_y']
df['y_var'] = df['WScore var_y'] + df['WScore var_x']
return df
train = preprocess(train)
test = preprocess(test)
test.shape<drop_column> | images, labels=next(batch)
probabilities= model.predict(images)
predictions=np.argmax(probabilities,axis=-1)
display_batch_of_images(( images,labels),predictions ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | train_win = train.copy()
train_los = train.copy()
train_win = train_win[['WSeed', 'LSeed',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_los = train_los[['LSeed', 'WSeed',
'y_score', 'x_score', 'x_count', 'y_count', 'x_var', 'y_var']]
train_win.columns = ['Seed_1', 'Seed_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
train_los.columns = ['Seed_1', 'Seed_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']
test = test[['ID', 'WSeed', 'LSeed',
'x_score', 'y_score', 'x_count', 'y_count', 'x_var', 'y_var']]
test.columns = ['ID', 'Seed_1', 'Seed_2',
'Score_1', 'Score_2', 'Count_1', 'Count_2', 'Var_1', 'Var_2']<feature_engineering> | test_ds = get_test_dataset(ordered=True)
print('Computing predictions...')
test_images_ds=test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions=np.argmax(probabilities,axis=-1)
print(predictions ) | Petals to the Metal - Flower Classification on TPU |
14,332,861 | def feature_engineering(df):
df['Seed_diff'] = df['Seed_1'] - df['Seed_2']
df['Score_diff'] = df['Score_1'] - df['Score_2']
df['Count_diff'] = df['Count_1'] - df['Count_2']
df['Var_diff'] = df['Var_1'] - df['Var_2']
df['Mean_score1'] = df['Score_1'] / df['Count_1']
df['Mean_score2'] = df['Score_2'] / df['Count_2']
df['Mean_score_diff'] = df['Mean_score1'] - df['Mean_score2']
df['FanoFactor_1'] = df['Var_1'] / df['Mean_score1']
df['FanoFactor_2'] = df['Var_2'] / df['Mean_score2']
return df
train_win = feature_engineering(train_win)
train_los = feature_engineering(train_los)
test = feature_engineering(test)
test.shape<feature_engineering> | print('Generating submission.csv file...')
test_ids_ds =test_ds.map(lambda image,idnum:idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv',np.rec.fromarrays([test_ids,predictions]),
fmt=['%s', '%d'],
delimiter=',',
header='id,label',
comments='',)
!head submssion.csv | Petals to the Metal - Flower Classification on TPU |
14,332,861 | <concatenate><EOS> | save_model = model.save('Efficienet_B7_model.h5' ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<drop_column> | print("Tensorflow version " + tf.__version__)
| Petals to the Metal - Flower Classification on TPU |
14,160,233 | test = test.drop(['ID'],axis=1)
test.head()<import_modules> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
import lightgbm as lgb
import xgboost as xgb
from xgboost import XGBClassifier<prepare_x_and_y> | GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started')
print(GCS_DS_PATH ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | y_train=data['result']
X_train=data.drop(columns='result' )<init_hyperparams> | IMAGE_SIZE = [512, 512]
GCS_PATH = GCS_DS_PATH + '/tfrecords-jpeg-512x512'
AUTO = tf.data.experimental.AUTOTUNE
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle',
'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily',
'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose',
'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion',
'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia',
'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy',
'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium',
'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily',
'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower',
'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset | Petals to the Metal - Flower Classification on TPU |
14,160,233 | params_lgb = {'num_leaves': 400,
'min_child_weight': 0.034,
'feature_fraction': 0.379,
'bagging_fraction': 0.418,
'min_data_in_leaf': 106,
'objective': 'binary',
'max_depth': 50,
'learning_rate': 0.0068,
"boosting_type": "gbdt",
"bagging_seed": 11,
"metric": 'logloss',
"verbosity": -1,
'reg_alpha': 0.3899,
'reg_lambda': 0.648,
'random_state': 47,
}
params_xgb = {'colsample_bytree': 0.8,
'learning_rate': 0.0004,
'max_depth': 31,
'subsample': 1,
'objective':'binary:logistic',
'eval_metric':'logloss',
'min_child_weight':3,
'gamma':0.25,
'n_estimators':5000
}<split> | def data_augment(image, label):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_saturation(image, 1, 3)
image = tf.image.random_contrast(image, 0.4, 0.6)
return image, label
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
num_training_images = count_data_items(TRAINING_FILENAMES)
weight_per_class = {}
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(num_training_images, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO ).concatenate(dataset)
label_counter = Counter()
for images, labels in dataset:
label_counter.update([labels.numpy() ])
total = sum(label_counter.values())
print("Nr of labels: ", total)
num_training_images = total
TARGET_NUM_PER_CLASS = total / len(CLASSES)
def get_weight_for_class(class_id):
counting = label_counter[class_id]
weight = TARGET_NUM_PER_CLASS / counting
return weight
weight_per_class = {class_id: get_weight_for_class(class_id)for class_id in range(104)}
dataset = dataset.shuffle(total, reshuffle_each_iteration=True)
dataset = dataset.repeat()
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset | Petals to the Metal - Flower Classification on TPU |
14,160,233 | NFOLDS = 10
folds = KFold(n_splits=NFOLDS)
columns = X_train.columns
splits = folds.split(X_train, y_train )<prepare_x_and_y> | BATCH_SIZE = 16 * strategy.num_replicas_in_sync
ds_train = get_training_dataset()
ds_valid = get_validation_dataset()
ds_test = get_test_dataset()
print("Training:", ds_train)
print("Validation:", ds_valid)
print("Test:", ds_test ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | y_preds_lgb = np.zeros(test.shape[0])
y_oof_lgb = np.zeros(X_train.shape[0])
<train_model> | np.set_printoptions(threshold=15, linewidth=80)
print("Training data shapes:")
for image, label in ds_train.take(3):
print(image.numpy().shape, label.numpy().shape)
print("Training data label examples:", label.numpy() ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | for fold_n,(train_index, valid_index)in enumerate(splits):
print('Fold:',fold_n+1)
X_train1, X_valid1 = X_train[columns].iloc[train_index], X_train[columns].iloc[valid_index]
y_train1, y_valid1 = y_train.iloc[train_index], y_train.iloc[valid_index]
dtrain = lgb.Dataset(X_train1, label=y_train1)
dvalid = lgb.Dataset(X_valid1, label=y_valid1)
clf = lgb.train(params_lgb, dtrain, 10000, valid_sets = [dtrain, dvalid], verbose_eval=200)
y_pred_valid = clf.predict(X_valid1)
y_oof_lgb[valid_index] = y_pred_valid
y_preds_lgb += clf.predict(test)/ NFOLDS<load_from_csv> | print("Test data shapes:")
for image, idnum in ds_test.take(3):
print(image.numpy().shape, idnum.numpy().shape)
print("Test data IDs:", idnum.numpy().astype('U')) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | submission_df = pd.read_csv(path + 'WSampleSubmissionStage1_2020.csv')
submission_df['Pred'] = y_preds_lgb
submission_df<load_from_csv> | def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)> 0:
plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return(subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show()
def display_training_curves(training, validation, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10), facecolor='
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
def plot_class_distribution() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True)
label_counter = Counter()
for images, labels in dataset:
label_counter.update([labels.numpy() ])
ordered_labels = label_counter.most_common()
print("Most common: ", ordered_labels[0])
print("Least common: ", ordered_labels[len(ordered_labels)-1])
plt.bar(label_counter.keys() , label_counter.values() ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | test= pd.read_csv(path +'WSampleSubmissionStage1_2020.csv')
test.shape<save_to_csv> | ds_iter = iter(ds_train.unbatch().batch(20)) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | submission_df.to_csv('submission.csv', index=False )<load_from_csv> | one_batch = next(ds_iter)
display_batch_of_images(one_batch ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | train = pd.read_csv('/kaggle/input/titanic/train.csv',index_col=None)
print(train.shape)
test = pd.read_csv('/kaggle/input/titanic/test.csv',index_col=None)
print(test.shape)
subm = pd.read_csv('/kaggle/input/titanic/gender_submission.csv',index_col=None)
print(subm.shape )<install_modules> | with strategy.scope() :
pretrained_model = tf.keras.applications.DenseNet201(
weights='imagenet',
include_top=False ,
input_shape=[*IMAGE_SIZE, 3]
)
pretrained_model.trainable = False
model = tf.keras.Sequential([
pretrained_model,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(len(CLASSES), activation='softmax')
] ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | !pip install autoviml<import_modules> | model.compile(
optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
)
model.summary() | Petals to the Metal - Flower Classification on TPU |
14,160,233 | from autoviml.Auto_ViML import Auto_ViML<define_variables> | EPOCHS = 18
STEPS_PER_EPOCH = num_training_images // BATCH_SIZE
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=2, restore_best_weights=True)
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
callbacks=[early_stopping],
class_weight=weight_per_class
) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | target = 'Survived'<choose_model_class> | for layer in model.layers:
layer.trainable = True
opt = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=opt,
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'],
)
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=12,
steps_per_epoch=STEPS_PER_EPOCH,
callbacks=[early_stopping]
) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | m, feats, trainm, testm = Auto_ViML(train, target, test,
sample_submission=subm,
scoring_parameter='balanced-accuracy', KMeans_Featurizer=True,
hyper_param='GS',feature_reduction=True,
Boosting_Flag=None,Binning_Flag=True,
Add_Poly=0, Stacking_Flag=True,Imbalanced_Flag=False,
verbose=0 )<data_type_conversions> | cmdataset = get_validation_dataset(ordered=True)
images_ds = cmdataset.map(lambda image, label: image)
labels_ds = cmdataset.map(lambda image, label: label ).unbatch()
cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
cm_probabilities = model.predict(images_ds)
cm_predictions = np.argmax(cm_probabilities, axis=-1)
labels = range(len(CLASSES))
cmat = confusion_matrix(
cm_correct_labels,
cm_predictions,
labels=labels,
)
cmat =(cmat.T / cmat.sum(axis=1)).T | Petals to the Metal - Flower Classification on TPU |
14,160,233 | subm[target] = testm[target+'_predictions'].astype(int ).values<define_variables> | score = f1_score(
cm_correct_labels,
cm_predictions,
labels=labels,
average='macro',
)
precision = precision_score(
cm_correct_labels,
cm_predictions,
labels=labels,
average='macro',
)
recall = recall_score(
cm_correct_labels,
cm_predictions,
labels=labels,
average='macro',
)
display_confusion_matrix(cmat, score, precision, recall ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | filename='sample_submission_log.csv'
savefile = '/kaggle/working/Survived/sample_submission_log.csv'
savefile<save_to_csv> | dataset = get_validation_dataset()
dataset = dataset.unbatch().batch(20)
batch = iter(dataset ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | subm.to_csv(savefile,index=False )<import_modules> | images, labels = next(batch)
probabilities = model.predict(images)
predictions = np.argmax(probabilities, axis=-1)
display_batch_of_images(( images, labels), predictions ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | from autoviml.Auto_NLP import Auto_NLP<define_variables> | test_ds = get_test_dataset(ordered=True)
print('Computing predictions...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions ) | Petals to the Metal - Flower Classification on TPU |
14,160,233 | <train_on_grid><EOS> | print('Generating submission.csv file...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt(
'submission.csv',
np.rec.fromarrays([test_ids, predictions]),
fmt=['%s', '%d'],
delimiter=',',
header='id,label',
comments='',
)
!head submission.csv | Petals to the Metal - Flower Classification on TPU |
13,618,247 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<compute_train_metric> | tf.data.experimental.AUTOTUNE
print("TF version " + tf.__version__)
AUTO = tf.data.experimental.AUTOTUNE
| Petals to the Metal - Flower Classification on TPU |
13,618,247 | m4, feats4, trainm4, testm4 = Auto_ViML(train_nlp, target, test_nlp,
sample_submission=subm,
scoring_parameter='balanced-accuracy',
hyper_param='GS',feature_reduction=True,
Boosting_Flag="CatBoost",Binning_Flag=False,
Add_Poly=0, Stacking_Flag=False,Imbalanced_Flag=False,
verbose=2 )<data_type_conversions> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm[target] = testm4['Survived_CatBoost_predictions'].values.astype(int)
subm.head()<save_to_csv> | IMAGE_SIZE = [512, 512]
EPOCHS = 8
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
SEED = 752
SKIP_VALIDATION = False
TTA_NUM = 5
random.seed(SEED)
np.random.seed(SEED)
tf.random.set_seed(SEED ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm.to_csv('sample_submission4.csv',index=False )<compute_train_metric> | np.set_printoptions(threshold=15, linewidth=80)
def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)> 0:
plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return(subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show()
def display_confusion_matrix(cmat, score, precision, recall):
plt.figure(figsize=(15,15))
ax = plt.gca()
ax.matshow(cmat, cmap='Reds')
ax.set_xticks(range(len(CLASSES)))
ax.set_xticklabels(CLASSES, fontdict={'fontsize': 7})
plt.setp(ax.get_xticklabels() , rotation=45, ha="left", rotation_mode="anchor")
ax.set_yticks(range(len(CLASSES)))
ax.set_yticklabels(CLASSES, fontdict={'fontsize': 7})
plt.setp(ax.get_yticklabels() , rotation=45, ha="right", rotation_mode="anchor")
titlestring = ""
if score is not None:
titlestring += 'f1 = {:.3f} '.format(score)
if precision is not None:
titlestring += '
precision = {:.3f} '.format(precision)
if recall is not None:
titlestring += '
recall = {:.3f} '.format(recall)
if len(titlestring)> 0:
ax.text(101, 1, titlestring, fontdict={'fontsize': 18, 'horizontalalignment':'right', 'verticalalignment':'top', 'color':'
plt.show()
def display_training_curves(training, validation, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10), facecolor='
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'] ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | m2, feats2, trainm2, testm2 = Auto_ViML(train_nlp, target, test_nlp,
sample_submission=subm,
scoring_parameter='balanced-accuracy',
hyper_param='GS',feature_reduction=True,
Boosting_Flag=None,Binning_Flag=False,
Add_Poly=0, Stacking_Flag=False,
Imbalanced_Flag=False,
verbose=2 )<data_type_conversions> | def random_blockout(img, sl=0.1, sh=0.2, rl=0.4):
p=random.random()
if p>=0.25:
w, h, c = IMAGE_SIZE[0], IMAGE_SIZE[1], 3
origin_area = tf.cast(h*w, tf.float32)
e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32)
e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32)
e_height_h = tf.minimum(e_size_h, h)
e_width_h = tf.minimum(e_size_h, w)
erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32)
erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32)
erase_area = tf.zeros(shape=[erase_height, erase_width, c])
erase_area = tf.cast(erase_area, tf.uint8)
pad_h = h - erase_height
pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32)
pad_bottom = pad_h - pad_top
pad_w = w - erase_width
pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32)
pad_right = pad_w - pad_left
erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1)
erase_mask = tf.squeeze(erase_mask, axis=0)
erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32))
return tf.cast(erased_img, img.dtype)
else:
return tf.cast(img, img.dtype ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm[target] =(testm2['Survived_proba_1']>0.5 ).astype(int ).values
subm.head()<save_to_csv> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label):
image = tf.image.random_flip_left_right(image, seed=SEED)
image = random_blockout(image)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
| Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm.to_csv('sample_submission2.csv',index=False )<compute_train_metric> | if SKIP_VALIDATION:
TRAINING_FILENAMES = TRAINING_FILENAMES + VALIDATION_FILENAMES | Petals to the Metal - Flower Classification on TPU |
13,618,247 | m3, feats3, trainm3, testm3 = Auto_ViML(train_nlp, target, test_nlp,
sample_submission=subm,
scoring_parameter='balanced-accuracy',
hyper_param='RS',feature_reduction=True,
Boosting_Flag=True,Binning_Flag=False,
Add_Poly=0, Stacking_Flag=False,
Imbalanced_Flag=False,
verbose=2 )<data_type_conversions> | training_dataset = get_training_dataset()
validation_dataset = get_validation_dataset()
training_dataset,validation_dataset | Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm[target] =(testm3['Survived_proba_1']>0.5 ).astype(int ).values
subm.head()<save_to_csv> | print("Number of classes: {}".format(len(CLASSES)))
print("First five classes, sorted alphabetically:")
for name in sorted(CLASSES)[:5]:
print(name)
print("Number of training images: {}".format(NUM_TRAINING_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | subm.to_csv('sample_submission3.csv',index=False )<set_options> | print("Training data shapes:")
for image, label in get_training_dataset().take(1):
print(image.numpy().shape, label.numpy().shape)
print("Training data label examples:", label.numpy())
print("Validation data shapes:")
for image, label in get_validation_dataset().take(1):
print(image.numpy().shape, label.numpy().shape)
print("Validation data label examples:", label.numpy())
print("Test data shapes:")
for image, idnum in get_test_dataset().take(1):
print(image.numpy().shape, idnum.numpy().shape)
print("Test data IDs:", idnum.numpy().astype('U')) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | warnings.filterwarnings('ignore' )<load_from_csv> | training_dataset = get_training_dataset()
training_dataset = training_dataset.unbatch().batch(20)
train_batch = iter(training_dataset ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | df = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv',index_col='Id')
df_val = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv',index_col='Id')
y=np.log1p(df.SalePrice)
df=df.drop(['SalePrice'],axis=1)
print('shape of df:', df.shape)
print('shape of df_val:', df_val.shape )<filter> | display_batch_of_images(next(train_batch)) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | def remove_nanish(nan_thresh,df,df_val):
X=df
mask1=X.isnull().sum() /len(X)<nan_thresh
mask2=df_val.isnull().sum() /len(df_val)<nan_thresh
keeps=X.columns[mask1 & mask2]
X=X[keeps]
X_valid=df_val[keeps]
return X, X_valid<rename_columns> | test_dataset = get_test_dataset()
test_dataset = test_dataset.unbatch().batch(20)
test_batch = iter(test_dataset ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | def impute_nums(num_X_train,num_X_test,num_X_valid):
SI=SimpleImputer(strategy='mean')
num_X_train = pd.DataFrame(SI.fit_transform(num_X_train),index=num_X_train.index, columns=num_X_train.columns)
num_X_test = pd.DataFrame(SI.transform(num_X_test),index=num_X_test.index, columns=num_X_test.columns)
num_X_valid = pd.DataFrame(SI.transform(num_X_valid),index=num_X_valid.index, columns=num_X_valid.columns)
return num_X_train, num_X_test, num_X_valid<normalization> | display_batch_of_images(next(test_batch)) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | def scale(num_X_train,num_X_test,num_X_valid):
SS=StandardScaler()
num_X_train = pd.DataFrame(SS.fit_transform(num_X_train),index=num_X_train.index, columns=num_X_train.columns)
num_X_test = pd.DataFrame(SS.transform(num_X_test),index=num_X_test.index, columns=num_X_test.columns)
num_X_valid = pd.DataFrame(SS.transform(num_X_valid),index=num_X_valid.index, columns=num_X_valid.columns)
return num_X_train, num_X_test, num_X_valid<categorify> | LR_START = 0.00001
LR_MAX = 0.00006 * strategy.num_replicas_in_sync
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose = True)
rng = [i for i in range(25 if EPOCHS<25 else EPOCHS)]
y = [lrfn(x)for x in rng]
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | def impute_encode(cat_X_train,cat_X_test,cat_X_valid):
im=SimpleImputer(strategy='most_frequent')
cat_X_train = pd.DataFrame(im.fit_transform(cat_X_train),index=cat_X_train.index, columns=cat_X_train.columns)
cat_X_test = pd.DataFrame(im.transform(cat_X_test),index=cat_X_test.index, columns=cat_X_test.columns)
cat_X_valid = pd.DataFrame(im.transform(cat_X_valid),index=cat_X_valid.index, columns=cat_X_valid.columns)
OH = OneHotEncoder(handle_unknown='ignore',sparse=False)
cat_X_train = pd.DataFrame(OH.fit_transform(cat_X_train),index=cat_X_train.index)
cat_X_test = pd.DataFrame(OH.transform(cat_X_test),index=cat_X_test.index)
cat_X_valid = pd.DataFrame(OH.transform(cat_X_valid),index=cat_X_valid.index)
return cat_X_train, cat_X_test, cat_X_valid<categorify> | !pip install -q efficientnet
| Petals to the Metal - Flower Classification on TPU |
13,618,247 | def preprocess(X,df_val):
nan_thresh=0.0450
skew_thresh=40
target_corr=0.01
X, X_valid=remove_nanish(nan_thresh,df,df_val)
categorical_cols = [cname for cname in X.columns if X[cname].dtype == "object" ]
numerical_cols = [cname for cname in X.columns if X[cname].dtype in ['int64', 'float64']]
cols=categorical_cols+numerical_cols
X = X[cols]
X_valid= X_valid[cols]
X_train, X_test, y_train, y_test =train_test_split(X,y,test_size=0.2)
num_X_train = X_train.drop(categorical_cols, axis=1)
num_X_test = X_test.drop(categorical_cols, axis=1)
num_X_valid = X_valid.drop(categorical_cols, axis=1)
num_X_train, num_X_test, num_X_valid= impute_nums(num_X_train, num_X_test, num_X_valid)
skewed=[col for col in num_X_train.columns if abs(num_X_train[col].skew(axis=0)) >skew_thresh]
for col in skewed:
num_X_train[col]=np.log1p(num_X_train[col])
num_X_test[col]=np.log1p(num_X_test[col])
num_X_valid[col]=np.log1p(num_X_valid[col])
num_X_train, num_X_test, num_X_valid= scale(num_X_train, num_X_test, num_X_valid)
for col in numerical_cols:
if abs(y.corr(num_X_train[col])) < target_corr:
num_X_train.drop([col],axis='columns',inplace=True)
num_X_test.drop([col],axis='columns',inplace=True)
num_X_valid.drop([col],axis='columns',inplace=True)
numerical_cols = [cname for cname in num_X_train.columns if num_X_train[cname].dtype in ['int64', 'float64']]
cat_X_train = X_train[categorical_cols]
cat_X_test = X_test[categorical_cols]
cat_X_valid = X_valid[categorical_cols]
cat_X_train,cat_X_test,cat_X_valid = impute_encode(cat_X_train,cat_X_test,cat_X_valid)
X_train = pd.concat([num_X_train, cat_X_train], axis=1,join='inner')
X_test = pd.concat([num_X_test, cat_X_test ], axis=1,join='inner')
X_valid = pd.concat([num_X_valid, cat_X_valid ], axis=1,join='inner')
return X_train, X_test, X_valid, y_train, y_test, numerical_cols<split> | with strategy.scope() :
enet = efn.EfficientNetB7(input_shape=[*IMAGE_SIZE, 3], weights='imagenet', include_top=False)
enet.trainable = True
model1 = tf.keras.Sequential([
enet,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dense(len(CLASSES), activation='softmax')
])
model1.compile(
optimizer=tf.keras.optimizers.Adam() ,
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
model1.summary() | Petals to the Metal - Flower Classification on TPU |
13,618,247 | X_train, X_test, X_valid, y_train, y_test, numerical_cols = preprocess(df,df_val )<compute_train_metric> | weight_path_save1 = 'Effnet_best_model.hdf5'
checkpoint = ModelCheckpoint(weight_path_save1,
monitor= 'val_loss',
verbose=1,
save_best_only=True,
mode= 'min',
save_weights_only = False,
period = 1)
early = EarlyStopping(monitor= 'val_loss',
mode= 'min',
patience=2)
callbacks_list = [checkpoint, early, lr_callback] | Petals to the Metal - Flower Classification on TPU |
13,618,247 | best_lasso=Lasso(alpha=0.00045)
best_lasso.fit(X_train,y_train)
test_preds=best_lasso.predict(X_test)
test_rmse=np.sqrt(mean_squared_error(y_test,test_preds))
train_preds=best_lasso.predict(X_train)
train_rmse=np.sqrt(mean_squared_error(y_train,train_preds))
print(f'train error {train_rmse}')
print(f'test error {test_rmse}' )<save_to_csv> | if not SKIP_VALIDATION:
history1 = model1.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
validation_data=get_validation_dataset() ,
callbacks = callbacks_list)
else:
history1 = model1.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks = callbacks_list ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | valid_preds=np.expm1(pd.Series(best_lasso.predict(X_valid),index=X_valid.index))
example=pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv',index_col='Id')
example['SalePrice']=valid_preds
example.to_csv('submission.csv',index='Id' )<import_modules> | with strategy.scope() :
densenet = tf.keras.applications.DenseNet201(input_shape=[*IMAGE_SIZE, 3], weights='imagenet', include_top=False)
densenet.trainable = True
model2 = tf.keras.Sequential([
densenet,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dense(len(CLASSES), activation='softmax')
])
model2.compile(
optimizer=tf.keras.optimizers.Adam() ,
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
model2.summary() | Petals to the Metal - Flower Classification on TPU |
13,618,247 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from tpot import TPOTClassifier<load_from_csv> | weight_path_save2 = 'Dense_best_model.hdf5'
checkpoint = ModelCheckpoint(weight_path_save2,
monitor= 'val_loss',
verbose=1,
save_best_only=True,
mode= 'min',
save_weights_only = False)
early = EarlyStopping(monitor= 'val_loss',
mode= 'min',
patience=2)
callbacks_list = [checkpoint, early, lr_callback] | Petals to the Metal - Flower Classification on TPU |
13,618,247 | train_df = pd.read_csv('.. /input/titanic/train.csv')
test_df = pd.read_csv('.. /input/titanic/test.csv')
print('train_df.shape:', train_df.shape)
print('test_df.shape:', test_df.shape )<create_dataframe> | if not SKIP_VALIDATION:
history2 = model2.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
validation_data=get_validation_dataset() ,
callbacks = callbacks_list)
else:
history2 = model2.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks = [lr_callback] ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | train_test_data = [train_df, test_df]<data_type_conversions> | model1.load_weights(weight_path_save1)
model2.load_weights(weight_path_save2 ) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | print(train_df.columns.to_list())
print(test_df.columns.to_list() )<count_missing_values> | if not SKIP_VALIDATION:
cmdataset = get_validation_dataset(ordered=True)
images_ds = cmdataset.map(lambda image, label: image)
labels_ds = cmdataset.map(lambda image, label: label ).unbatch()
cm_correct_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
m1 = model1.predict(images_ds)
m2 = model2.predict(images_ds)
scores = []
for alpha in np.linspace(0,1,100):
cm_probabilities = alpha*m1+(1-alpha)*m2
cm_predictions = np.argmax(cm_probabilities, axis=-1)
scores.append(f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro'))
best_alpha = np.argmax(scores)/100
else:
best_alpha = 0.51
print('Best alpha: ' + str(best_alpha))
| Petals to the Metal - Flower Classification on TPU |
13,618,247 | train_null_s = train_df.isnull().sum()
print(train_null_s[train_null_s != 0])
print('-'*80)
test_null_s = test_df.isnull().sum()
print(test_null_s[test_null_s != 0] )<define_variables> | if not SKIP_VALIDATION:
cmat = confusion_matrix(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)))
score = f1_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')
precision = precision_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')
recall = recall_score(cm_correct_labels, cm_predictions, labels=range(len(CLASSES)) , average='macro')
display_confusion_matrix(cmat, score, precision, recall)
print('f1 score: {:.3f}, precision: {:.3f}, recall: {:.3f}'.format(score, precision, recall)) | Petals to the Metal - Flower Classification on TPU |
13,618,247 | outlier_detection_field = ['Age', 'Fare']
weight = 2
outlier_indices = []
for col in outlier_detection_field:
q1 = np.nanpercentile(train_df[col], 25)
q3 = np.nanpercentile(train_df[col], 75)
iqr = q3-q1
iqr_weight = iqr * weight
lowest_val = q1 - iqr_weight
highest_val = q3 + iqr_weight
outlier_index = train_df[(train_df[col]<lowest_val)|(highest_val<train_df[col])].index
outlier_indices.extend(outlier_index)
print('{}: {} / {}(record size:{})'.format(col, lowest_val, highest_val, outlier_index.shape[0]))<drop_column> | def predict_tta(model, n_iter):
probs = []
for i in range(n_iter):
test_ds = get_test_dataset(ordered=True)
test_images_ds = test_ds.map(lambda image, idnum: image)
probs.append(model.predict(test_images_ds,verbose=0))
return probs | Petals to the Metal - Flower Classification on TPU |
13,618,247 | <drop_column><EOS> | test_ds = get_test_dataset(ordered=True)
print('Calculating predictions...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probs1 = np.mean(predict_tta(model1, TTA_NUM), axis=0)
probs2 = np.mean(predict_tta(model2, TTA_NUM), axis=0)
probabilities = best_alpha*probs1 +(1-best_alpha)*probs2
predictions = np.argmax(probabilities, axis=-1)
print('Generating submission file...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<data_type_conversions> | Image(filename=".. /input/images/Petals to the Metal 31.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | print(train_df.columns.to_list())
print(test_df.columns.to_list() )<normalization> | Image(filename=".. /input/images/Petals to the Metal 32.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | minMaxScaler = MinMaxScaler()
for data in train_test_data:
data['Pclass'] = minMaxScaler.fit_transform(data[['Pclass']] )<count_values> | Image(filename=".. /input/images/Petals to the Metal 28.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Pclass'].value_counts()<feature_engineering> | Image(filename=".. /input/images/3d_plot3.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data['Title'] = data['Name'].str.extract('([a-zA-Z]+)\.', expand=False )<drop_column> | Image(filename=".. /input/images/3d_plot4.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data.drop('Name', axis=1, inplace=True )<count_values> | Image(filename=".. /input/images/3d_plot5.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | print(train_df['Title'].value_counts())
print('-'*50)
print(test_df['Title'].value_counts() )<categorify> | Image(filename=".. /input/images/Petals to the Metal 10.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | title_mapping = {
'Mr':0,
'Miss':1,
'Mrs':2,
'Master':3,
'Dr':4, 'Rev':4, 'Major':4, 'Mlle':4, 'Col':4, 'Ms':4, 'Countess':4, 'Mme':4, 'Lady':4, 'Sir':4, 'Don':4, 'Jonkheer':4, 'Capt':4, 'Dona':4
}
for data in train_test_data:
data['Title'] = data['Title'].map(title_mapping )<count_values> | Image(filename=".. /input/images/Petals to the Metal 25.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Title'].value_counts()<normalization> | Image(filename=".. /input/images/Petals to the Metal 26.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | minMaxScaler = MinMaxScaler()
for data in train_test_data:
data['Title'] = minMaxScaler.fit_transform(data[['Title']] )<count_values> | Image(filename=".. /input/images/Petals to the Metal 22.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Title'].value_counts()<data_type_conversions> | Image(filename=".. /input/images/Deep Learning Adventures.png", width=1200, height=1000 ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data['Sex'] = data['Sex'].astype('category' ).cat.codes<count_values> | !pip install seaborn --upgrade
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Sex'].value_counts()<count_missing_values> | GCS_DS_PATH = KaggleDatasets().get_gcs_path('tpu-getting-started')
print(GCS_DS_PATH ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Age'].isnull().sum()<categorify> | IMAGE_SIZE = [512, 512]
GCS_PATH = GCS_DS_PATH + '/tfrecords-jpeg-512x512'
AUTO = tf.data.experimental.AUTOTUNE
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle',
'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily',
'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose',
'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion',
'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia',
'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy',
'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium',
'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily',
'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower',
'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose']
def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data['Age'].fillna(train_df.groupby('Title')['Age'].transform('median'), inplace=True )<count_missing_values> | GCS_DS_PATH_EXT = KaggleDatasets().get_gcs_path('tf-flower-photo-tfrec')
GCS_PATH_SELECT_EXT = {
192: '/tfrecords-jpeg-192x192',
224: '/tfrecords-jpeg-224x224',
331: '/tfrecords-jpeg-331x331',
512: '/tfrecords-jpeg-512x512'
}
GCS_PATH_EXT = GCS_PATH_SELECT_EXT[IMAGE_SIZE[0]]
IMAGENET_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/imagenet' + GCS_PATH_EXT + '/*.tfrec')
INATURELIST_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/inaturalist' + GCS_PATH_EXT + '/*.tfrec')
OPENIMAGE_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/openimage' + GCS_PATH_EXT + '/*.tfrec')
OXFORD_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/oxford_102' + GCS_PATH_EXT + '/*.tfrec')
TENSORFLOW_FILES = tf.io.gfile.glob(GCS_DS_PATH_EXT + '/tf_flowers' + GCS_PATH_EXT + '/*.tfrec')
ADDITIONAL_TRAINING_FILENAMES = IMAGENET_FILES + INATURELIST_FILES + OPENIMAGE_FILES + OXFORD_FILES + TENSORFLOW_FILES
TRAINING_FILENAMES = TRAINING_FILENAMES + ADDITIONAL_TRAINING_FILENAMES | Petals to the Metal - Flower Classification on TPU |
13,858,092 | train_df['Age'].isnull().sum()<feature_engineering> | SEED = 2020
def random_blockout(img, sl=0.1, sh=0.2, rl=0.4):
p=random.random()
if p>=0.25:
w, h, c = IMAGE_SIZE[0], IMAGE_SIZE[1], 3
origin_area = tf.cast(h*w, tf.float32)
e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32)
e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32)
e_height_h = tf.minimum(e_size_h, h)
e_width_h = tf.minimum(e_size_h, w)
erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32)
erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32)
erase_area = tf.zeros(shape=[erase_height, erase_width, c])
erase_area = tf.cast(erase_area, tf.uint8)
pad_h = h - erase_height
pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32)
pad_bottom = pad_h - pad_top
pad_w = w - erase_width
pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32)
pad_right = pad_w - pad_left
erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1)
erase_mask = tf.squeeze(erase_mask, axis=0)
erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32))
return tf.cast(erased_img, img.dtype)
else:
return tf.cast(img, img.dtype)
def data_augment_v2(image, label):
flag = random.randint(1,3)
coef_1 = random.randint(70, 90)* 0.01
coef_2 = random.randint(70, 90)* 0.01
if flag == 1:
image = tf.image.random_flip_left_right(image, seed=SEED)
elif flag == 2:
image = tf.image.random_flip_up_down(image, seed=SEED)
else:
image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED)
image = random_blockout(image)
return image, label | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data['Age'] = pd.qcut(data['Age'], 9 ).cat.codes<normalization> | def data_augment(image, label):
image = tf.image.random_flip_left_right(image)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES))
| Petals to the Metal - Flower Classification on TPU |
13,858,092 | minMaxScaler = MinMaxScaler()
for data in train_test_data:
data['Age'] = minMaxScaler.fit_transform(data[['Age']] )<feature_engineering> | strategy.num_replicas_in_sync | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data['FamilySize'] = data['Parch'] + data['SibSp']<drop_column> | 16 * strategy.num_replicas_in_sync | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data.drop(['SibSp', 'Parch'], axis=1, inplace=True )<feature_engineering> | BATCH_SIZE = 16 * strategy.num_replicas_in_sync
ds_train = get_training_dataset()
ds_valid = get_validation_dataset()
ds_test = get_test_dataset()
print("Training:", ds_train)
print("Validation:", ds_valid)
print("Test:", ds_test ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 |
<normalization> | np.set_printoptions(threshold=15, linewidth=80)
print("Training data shapes:")
for image, label in ds_train.take(3):
print(image.numpy().shape, label.numpy().shape)
print("Training data label examples:", label.numpy() ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | minMaxScaler = MinMaxScaler()
for data in train_test_data:
data['FamilySize'] = minMaxScaler.fit_transform(data[['FamilySize']] )<count_values> | print("Test data shapes:")
for image, idnum in ds_test.take(3):
print(image.numpy().shape, idnum.numpy().shape)
print("Test data IDs:", idnum.numpy().astype('U')) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | print(train_df['Ticket'].value_counts())
print('-'*80)
print(train_df['Ticket'].unique().shape )<drop_column> | def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label],
'OK' if correct else 'NO',
u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)> 0:
plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return(subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None, display_mismatches_only=False):
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3
if display_mismatches_only:
if predictions[i] != label:
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
else:
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show()
def display_training_curves(training, validation, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10), facecolor='
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
def display_training_curves_v2(training, validation, learning_rate_list, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10), facecolor='
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title, color='b')
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.', 'learning rate'])
ax2 = ax.twinx()
ax2.plot(learning_rate_list, 'g-')
ax2.set_ylabel('learning rate', color='g' ) | Petals to the Metal - Flower Classification on TPU |
13,858,092 | for data in train_test_data:
data.drop('Ticket', axis=1, inplace=True )<count_missing_values> | ds_iter = iter(ds_train.unbatch().batch(20)) | Petals to the Metal - Flower Classification on TPU |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.