kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
12,686,034 | def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
model = Sequential()
model.add(Conv2D(512,(3, 3), input_shape=(IMG_SHAPE[0], IMG_SHAPE[1], 3)))
model.add(Conv2D(256,(3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256,(3, 3)))
model.add(Conv2D(128,(3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128,(3, 3)))
model.add(Conv2D(128,(3, 3)))
model.add(Activation('relu'))
model.add(GlobalMaxPooling2D())
model.add(Dropout(0.25))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(Dense(37))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adamax', metrics=[root_mean_squared_error])
model.summary()<train_model> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label):
flag = random.randint(1,3)
coef_1 = random.randint(70, 90)* 0.01
coef_2 = random.randint(70, 90)* 0.01
if flag == 1:
image = tf.image.random_flip_left_right(image, seed=SEED)
elif flag == 2:
image = tf.image.random_flip_up_down(image, seed=SEED)
else:
image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
12,686,034 | batch_size = 128
model.fit(X_train, y_train, epochs=30, validation_data=(X_test, y_test))<define_variables> | LR_START = 0.00001
LR_MAX = 0.00005 * strategy.num_replicas_in_sync
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 8
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True)
rng = [i for i in range(EPOCHS)]
y = [lrfn(x)for x in rng]
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) | Petals to the Metal - Flower Classification on TPU |
12,686,034 | def test_image_generator(ids, shape=IMG_SHAPE):
x1 =(ORIG_SHAPE[0]-CROP_SIZE[0])//2
y1 =(ORIG_SHAPE[1]-CROP_SIZE[1])//2
x_batch = []
for i in ids:
x = get_image('.. /input/44352/images_test_rev1/'+i, x1, y1, shape=IMG_SHAPE, crop_size=CROP_SIZE)
x_batch.append(x)
x_batch = np.array(x_batch)
return x_batch
val_files = os.listdir('.. /input/44352/images_test_rev1/')
val_predictions = []
N_val = len(val_files)
for i in tqdm(np.arange(0, N_val, batch_size)) :
if i+batch_size > N_val:
upper = N_val
else:
upper = i+batch_size
X = test_image_generator(val_files[i:upper])
y_pred = model.predict(X)
val_predictions.append(y_pred)
val_predictions = np.array(val_predictions)
Y_pred = np.vstack(val_predictions)
ids = np.array([v.split('.')[0] for v in val_files] ).reshape(len(val_files),1)
submission_df = pd.DataFrame(np.hstack(( ids, Y_pred)) , columns=df.columns)
submission_df = submission_df.sort_values(by=['GalaxyID'])
submission_df.to_csv('sample_submission.csv', index=False )<import_modules> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
return Model(inputs=base_model.input, outputs=predictions)
with strategy.scope() :
model = get_model(DenseNet201)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
| Petals to the Metal - Flower Classification on TPU |
12,686,034 | print("Read in libraries")
<load_from_csv> | history = model.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks=[lr_callback, ModelCheckpoint(filepath='my_densenet_201.h5', monitor='val_loss',
save_best_only=True)],
validation_data=get_validation_dataset() ,
workers = 3 ) | Petals to the Metal - Flower Classification on TPU |
12,686,034 | PATH_WEEK2='/kaggle/input/covid19-global-forecasting-week-2'
df_train = pd.read_csv(f'{PATH_WEEK2}/train.csv')
df_test = pd.read_csv(f'{PATH_WEEK2}/test.csv')
df_train.head()
df_test.head()
df_train.rename(columns={'Country_Region':'Country'}, inplace=True)
df_test.rename(columns={'Country_Region':'Country'}, inplace=True)
df_train.rename(columns={'Province_State':'State'}, inplace=True)
df_test.rename(columns={'Province_State':'State'}, inplace=True)
df_train['Date'] = pd.to_datetime(df_train['Date'], infer_datetime_format=True)
df_test['Date'] = pd.to_datetime(df_test['Date'], infer_datetime_format=True)
df_train.info()
df_test.info()
y1_Train = df_train.iloc[:, -2]
y1_Train.head()
y2_Train = df_train.iloc[:, -1]
y2_Train.head()
EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if state == EMPTY_VAL: return country
return state<data_type_conversions> | model = tf.keras.models.load_model('my_densenet_201.h5' ) | Petals to the Metal - Flower Classification on TPU |
12,686,034 | <categorify><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<categorify> | !pip install -q efficientnet | Petals to the Metal - Flower Classification on TPU |
12,552,158 | filterwarnings('ignore')
le = preprocessing.LabelEncoder()
countries = X_Train.Country.unique()
df_out = pd.DataFrame({'ForecastId': [], 'ConfirmedCases': [], 'Fatalities': []})
for country in countries:
states = X_Train.loc[X_Train.Country == country, :].State.unique()
for state in states:
X_Train_CS = X_Train.loc[(X_Train.Country == country)&(X_Train.State == state), ['State', 'Country', 'Date', 'ConfirmedCases', 'Fatalities']]
y1_Train_CS = X_Train_CS.loc[:, 'ConfirmedCases']
y2_Train_CS = X_Train_CS.loc[:, 'Fatalities']
X_Train_CS = X_Train_CS.loc[:, ['State', 'Country', 'Date']]
X_Train_CS.Country = le.fit_transform(X_Train_CS.Country)
X_Train_CS['State'] = le.fit_transform(X_Train_CS['State'])
X_Test_CS = X_Test.loc[(X_Test.Country == country)&(X_Test.State == state), ['State', 'Country', 'Date', 'ForecastId']]
X_Test_CS_Id = X_Test_CS.loc[:, 'ForecastId']
X_Test_CS = X_Test_CS.loc[:, ['State', 'Country', 'Date']]
X_Test_CS.Country = le.fit_transform(X_Test_CS.Country)
X_Test_CS['State'] = le.fit_transform(X_Test_CS['State'])
model1 = XGBRegressor(n_estimators=1000)
model1.fit(X_Train_CS, y1_Train_CS)
y1_pred = model1.predict(X_Test_CS)
model2 = XGBRegressor(n_estimators=1000)
model2.fit(X_Train_CS, y2_Train_CS)
y2_pred = model2.predict(X_Test_CS)
df = pd.DataFrame({'ForecastId': X_Test_CS_Id, 'ConfirmedCases': y1_pred, 'Fatalities': y2_pred})
df_out = pd.concat([df_out, df], axis=0)
<save_to_csv> | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
from tensorflow.keras.applications.nasnet import NASNetLarge
from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0 | Petals to the Metal - Flower Classification on TPU |
12,552,158 | df_out.ForecastId = df_out.ForecastId.astype('int')
df_out.tail()
df_out.to_csv('submission.csv', index=False )<load_from_csv> | %matplotlib inline
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/train.csv')
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv')
submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-2/test.csv' )<count_unique_values> | GCS_DS_PATH = KaggleDatasets().get_gcs_path() | Petals to the Metal - Flower Classification on TPU |
12,552,158 | print("Number of Country_Region: ", train['Country_Region'].nunique())
print("Dates go from day", max(train['Date']), "to day", min(train['Date']), ", a total of", train['Date'].nunique() , "days")
print("Countries with Province/State informed: ", train[train['Province_State'].isna() ==False]['Country_Region'].unique() )<feature_engineering> | IMAGE_SIZE = [512, 512]
EPOCHS = 30
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
SEED = 2020
NUM_TRAINING_IMAGES = 12753
NUM_TEST_IMAGES = 7382
STEPS_PER_EPOCH =(NUM_TRAINING_IMAGES // BATCH_SIZE)* 2 | Petals to the Metal - Flower Classification on TPU |
12,552,158 | EMPTY_VAL = "EMPTY_VAL"
def fillState(state, country):
if state == EMPTY_VAL: return country
return state
train['Province_State'].fillna(EMPTY_VAL, inplace=True)
train['Province_State'] = train.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
test['Province_State'].fillna(EMPTY_VAL, inplace=True)
test['Province_State'] = test.loc[:, ['Province_State', 'Country_Region']].apply(lambda x : fillState(x['Province_State'], x['Country_Region']), axis=1)
test.head()<prepare_x_and_y> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord)
return dataset
def data_augment(image, label):
flag = random.randint(1,3)
coef_1 = random.randint(75, 95)* 0.01
coef_2 = random.randint(75, 95)* 0.01
if flag == 1:
image = tf.image.random_flip_left_right(image, seed=SEED)
elif flag == 2:
image = tf.image.random_flip_up_down(image, seed=SEED)
else:
image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED)
return image, label
def get_training_dataset() :
dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/train/*.tfrec'), labeled=True)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(-1)
return dataset
def get_validation_dataset() :
dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/val/*.tfrec'), labeled=True, ordered=False)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(-1)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(tf.io.gfile.glob(GCS_DS_PATH + f'/tfrecords-jpeg-{IMAGE_SIZE[0]}x{IMAGE_SIZE[0]}/test/*.tfrec'), labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(-1)
return dataset
training_dataset = get_training_dataset()
validation_dataset = get_validation_dataset() | Petals to the Metal - Flower Classification on TPU |
12,552,158 | train['row_number'] = train.groupby(['Country_Region', 'Province_State'] ).cumcount()
x = train[train["Country_Region"] == 'China'][train["Province_State"] == 'Hubei']['row_number']
y = train[train["Country_Region"] == 'China'][train["Province_State"] == 'Hubei']['ConfirmedCases']
y_ = train[train["Country_Region"] == 'China'][train["Province_State"] == 'Hubei']['Fatalities']
def f(x, L, b, k, x_0):
return L /(1.+ np.exp(-k *(x - x_0)))+ b
def logistic(xs, L, k, x_0):
result = []
for x in xs:
xp = k*(x-x_0)
if xp >= 0:
result.append(L /(1.+ np.exp(-xp)))
else:
result.append(L * np.exp(xp)/(1.+ np.exp(xp)))
return result
p0 = [max(y), 0.0,max(x)]
p0_ = [max(y_), 0.0,max(x)]
x_ = np.arange(0, 100, 1 ).tolist()
try:
popt, pcov = opt.curve_fit(logistic, x, y,p0)
yfit = logistic(x_, *popt)
popt_, pcov_ = opt.curve_fit(logistic, x, y_,p0_)
yfit_ = logistic(x_, *popt_)
except:
popt, pcov = opt.curve_fit(f, x, y, method="lm", maxfev=5000)
yfit = f(x_, *popt)
popt_, pcov_ = opt.curve_fit(f, x, y_, method="lm", maxfev=5000)
yfit_ = f(x_, *popt_)
fig, ax = plt.subplots(1, 1, figsize=(10, 8))
ax.plot(x, y, 'o', label ='Actual Cases')
ax.plot(x_, yfit, '-', label ='Fitted Cases')
ax.plot(x, y_, 'o', label ='Actual Fatalities')
ax.plot(x_, yfit_, '-', label ='Fitted fatalities')
ax.title.set_text('China - Hubei province')
plt.legend(loc="center right")
plt.show()<prepare_output> | LR_START = 0.00001
LR_MAX = 0.0001
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 8
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True)
rng = [i for i in range(EPOCHS)]
y = [lrfn(x)for x in rng]
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | unique = pd.DataFrame(train.groupby(['Country_Region', 'Province_State'],as_index=False ).count())
unique.head()<prepare_x_and_y> | def get_model(use_model):
base_model = use_model(weights='noisy-student',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
return Model(inputs=base_model.input, outputs=predictions)
with strategy.scope() :
model = get_model(EfficientNetB0)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
tf.keras.utils.plot_model(
model, to_file='model.png', show_shapes=True, show_layer_names=True,
) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | def date_day_diff(d1, d2):
delta = dt.datetime.strptime(d1, "%Y-%m-%d")- dt.datetime.strptime(d2, "%Y-%m-%d")
return delta.days
log_regions = []
for index, region in unique.iterrows() :
st = region['Province_State']
co = region['Country_Region']
rdata = train[(train['Province_State']==st)&(train['Country_Region']==co)]
t = rdata['Date'].values
t = [float(date_day_diff(d, t[0])) for d in t]
y = rdata['ConfirmedCases'].values
y_ = rdata['Fatalities'].values
p0 = [max(y), 0.0, max(t)]
p0_ = [max(y_), 0.0, max(t)]
try:
popt, pcov = opt.curve_fit(logistic, t, y, p0, maxfev=10000)
try:
popt_, pcov_ = opt.curve_fit(logistic, t, y_, p0_, maxfev=10000)
except:
popt_, pcov_ = opt.curve_fit(f, t, y_,method="trf", maxfev=10000)
log_regions.append(( co,st,popt,popt_))
except:
popt, pcov = opt.curve_fit(f, t, y,method="trf", maxfev=10000)
popt_, pcov_ = opt.curve_fit(f, t, y_,method="trf", maxfev=10000)
log_regions.append(( co,st,popt,popt_))<prepare_output> | history = model.fit(training_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss',
save_best_only=True)],
validation_data=validation_dataset ) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | log_regions = pd.DataFrame(log_regions)
log_regions.head()<rename_columns> | model = tf.keras.models.load_model('my_ef_net_b7.h5' ) | Petals to the Metal - Flower Classification on TPU |
12,552,158 | <define_variables><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
10,232,699 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<count_duplicates> | !pip install -q efficientnet | Petals to the Metal - Flower Classification on TPU |
10,232,699 | for index, rt in log_regions.iterrows() :
st = rt['Province_State']
co = rt['Country_Region']
popt = list(rt['ConfirmedCases'])
popt_ = list(rt['Fatalities'])
if popt_ == [0.0,0.0,69.0]:
popt_ = np.multiply(fp,popt)
print(co,st,popt,popt_ )<save_to_csv> | print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
10,232,699 | submission = pd.DataFrame(submission)
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('./submission.csv', index = False )<load_from_csv> | AUTO = tf.data.experimental.AUTOTUNE
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
GCS_DS_PATH = KaggleDatasets().get_gcs_path()
IMAGE_SIZE = [512, 512]
EPOCHS = 20
BATCH_SIZE = 16 * strategy.num_replicas_in_sync | Petals to the Metal - Flower Classification on TPU |
10,232,699 | data= pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv")
test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv" )<feature_engineering> | GCS_PATH_SELECT = {
192: GCS_DS_PATH + '/tfrecords-jpeg-192x192',
224: GCS_DS_PATH + '/tfrecords-jpeg-224x224',
331: GCS_DS_PATH + '/tfrecords-jpeg-331x331',
512: GCS_DS_PATH + '/tfrecords-jpeg-512x512'
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec' ) | Petals to the Metal - Flower Classification on TPU |
10,232,699 | data['Province_State']=data['Province_State'].fillna('')
test['Province_State']=test['Province_State'].fillna('' )<import_modules> | CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle',
'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily',
'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose',
'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion',
'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia',
'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy',
'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium',
'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily',
'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower',
'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose'] | Petals to the Metal - Flower Classification on TPU |
10,232,699 | import matplotlib.pyplot as plt<data_type_conversions> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label, seed=2020):
image = tf.image.random_flip_left_right(image, seed=seed)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_train_valid_datasets() :
dataset = load_dataset(TRAINING_FILENAMES + VALIDATION_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n ) | Petals to the Metal - Flower Classification on TPU |
10,232,699 | datetime_str = '01/22/20 00:00:00'
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S')
data['days']=pd.to_datetime(data['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D')
test['days']=pd.to_datetime(test['Date'] ).sub(datetime_object)/np.timedelta64(1, 'D' )<sort_values> | def lrfn(epoch):
LR_START = 0.00001
LR_MAX = 0.00005 * strategy.num_replicas_in_sync
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.8
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr | Petals to the Metal - Flower Classification on TPU |
10,232,699 | data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:].sort_values(by="Date" )<save_to_csv> | with strategy.scope() :
enet = efn.EfficientNetB7(
input_shape=(512, 512, 3),
weights='imagenet',
include_top=False
)
model = tf.keras.Sequential([
enet,
tf.keras.layers.GlobalAveragePooling2D() ,
tf.keras.layers.Dense(len(CLASSES), activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam() ,
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
model.summary() | Petals to the Metal - Flower Classification on TPU |
10,232,699 | data.to_csv("train_1.csv")
test.to_csv("test_1.csv")
<import_modules> | def freeze(model):
for layer in model.layers:
layer.trainable = False
def unfreeze(model):
for layer in model.layers:
layer.trainable = True | Petals to the Metal - Flower Classification on TPU |
10,232,699 | from statsmodels.tsa.arima_model import ARIMA
<data_type_conversions> | NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
10,232,699 | data['Date']=pd.to_datetime(data['Date'])
test['Date']=pd.to_datetime(test['Date'] )<create_dataframe> | lr_schedule = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=1)
history = model.fit(
get_train_valid_datasets() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks=[lr_schedule]
)
| Petals to the Metal - Flower Classification on TPU |
10,232,699 | <count_missing_values><EOS> | test_ds = get_test_dataset(ordered=True)
print('Computing predictions...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Generating submission.csv file...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<filter> | !pip install -q efficientnet | Petals to the Metal - Flower Classification on TPU |
13,987,233 | data['ConfirmedCases'][data['Country_Region']==''][51:]<count_values> | %matplotlib inline
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | data['ConfirmedCases'][data['Country_Region']=='India'].value_counts()<create_dataframe> | GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" ) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | pd.DataFrame(data.loc[data['Country_Region']=='India',['ConfirmedCases']] )<data_type_conversions> | IMAGE_SIZE = [512, 512]
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
GCS_PATH_SELECT = {
192: GCS_DS_PATH + '/tfrecords-jpeg-192x192',
224: GCS_DS_PATH + '/tfrecords-jpeg-224x224',
331: GCS_DS_PATH + '/tfrecords-jpeg-331x331',
512: GCS_DS_PATH + '/tfrecords-jpeg-512x512'
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
SEED = 2020 | Petals to the Metal - Flower Classification on TPU |
13,987,233 | datetime_str = '03/22/20 00:00:00'
datetime_object = datetime.strptime(datetime_str, '%m/%d/%y %H:%M:%S' )<import_modules> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def get_validation_dataset() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | from datetime import timedelta<import_modules> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model1 = get_model(DenseNet201)
model1.load_weights("/kaggle/input/start-with-densenet201/my_densenet_201.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | import math<compute_test_metric> | def get_model(use_model):
base_model = use_model(weights='noisy-student',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model2 = get_model(EfficientNetB7)
model2.load_weights("/kaggle/input/start-with-pre-train/my_ef_net_b7.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | def rmsle(y, y_pred):
assert len(y)== len(y_pred)
terms_to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)]
return(sum(terms_to_sum)*(1.0/len(y)))** 0.5<train_model> | val_dataset = get_validation_dataset()
images_ds = val_dataset.map(lambda image, label: image)
labels_ds = val_dataset.map(lambda image, label: label ).unbatch()
val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
m1 = model1.predict(images_ds)
m2 = model2.predict(images_ds)
scores = []
for alpha in np.linspace(0,1,100):
val_probabilities = alpha*m1+(1-alpha)*m2
val_predictions = np.argmax(val_probabilities, axis=-1)
scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro'))
best_alpha = np.argmax(scores)/100
print('Best alpha: ' + str(best_alpha)) | Petals to the Metal - Flower Classification on TPU |
13,987,233 | <find_best_params><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities1 = model1.predict(test_images_ds)
probabilities2 = model2.predict(test_images_ds)
probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<set_options> | %matplotlib inline
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | warnings.filterwarnings('ignore' )<feature_engineering> | GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" ) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | test['ConfirmedCases']=0
test['Fatalities']=0<filter> | IMAGE_SIZE = [512, 512]
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
GCS_PATH_SELECT = {
192: GCS_DS_PATH + '/tfrecords-jpeg-192x192',
224: GCS_DS_PATH + '/tfrecords-jpeg-224x224',
331: GCS_DS_PATH + '/tfrecords-jpeg-331x331',
512: GCS_DS_PATH + '/tfrecords-jpeg-512x512'
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
SEED = 2020 | Petals to the Metal - Flower Classification on TPU |
13,558,595 | sliced_data=data.loc[(data['Province_State']=='')&(data['Country_Region']=='India'),:]<import_modules> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def get_validation_dataset() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
from statsmodels.tsa.arima_model import ARIMA
from sklearn.metrics import mean_squared_error
from math import sqrt
from time import time
from sklearn.metrics import mean_squared_error<remove_duplicates> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model1 = get_model(DenseNet201)
model1.load_weights("/kaggle/input/start-with-densenet201/my_densenet_201.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | country='India'
state=''
sliced_data=data.loc[(data['Province_State']==state)&(data['Country_Region']==country),:]
test_sliced=test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:]
print(sliced_data)
sliced_data=sliced_data.drop_duplicates()
sliced_data=sliced_data.reset_index(drop=True)
sliced_data=sliced_data.sort_values(by='Date')
if sliced_data.loc[sliced_data['ConfirmedCases']>0,:].shape[0]>0:
sliced_data=sliced_data.loc[sliced_data['ConfirmedCases']>0,:]
sliced_data=sliced_data.reset_index(drop=True)
max_date_train=sliced_data['Date'].max()
max_date_test=test_sliced['Date'].max()
forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D'))
history=sliced_data['ConfirmedCases'].to_list()
print('history')
print(history)
if len(history)==1:
history.append(history[0])
best_cfg,best_score=evaluate_models(history,forcast_days,range(10),range(7),range(7))
preds=[]
model = ARIMA(history, order=best_cfg)
model_fit = model.fit(disp=0)
preds=model_fit.forecast(steps=forcast_days)[0]
preds=[round(p)if p>0 else 0 for p in preds]
dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)]
predictions=pd.DataFrame()
predictions['Date']=dates
predictions['ConfirmedCases']=preds
test_sliced=test_sliced.merge(sliced_data[['Date','ConfirmedCases']], on='Date',how='left')
test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_y']
del test_sliced['ConfirmedCases_y']
del test_sliced['ConfirmedCases_x']
test_sliced=test_sliced.merge(predictions, on='Date',how='left')
test_sliced['ConfirmedCases_x'][test_sliced['ConfirmedCases_x'].isna() ]=test_sliced['ConfirmedCases_y'][test_sliced['ConfirmedCases_x'].isna() ]
test_sliced['ConfirmedCases']=test_sliced['ConfirmedCases_x']
del test_sliced['ConfirmedCases_y']
del test_sliced['ConfirmedCases_x']
sliced_data_bck=sliced_data.copy()
if sliced_data.loc[sliced_data['Fatalities']>0,:].shape[0]>0:
sliced_data=sliced_data.loc[sliced_data['Fatalities']>0,:]
sliced_data=sliced_data.reset_index(drop=True)
max_date_train=sliced_data['Date'].max()
max_date_test=test_sliced['Date'].max()
forcast_days=int(( max_date_test-max_date_train)/np.timedelta64(1, 'D'))
history=sliced_data['Fatalities'].to_list()
if len(history)==1:
history.append(history[0])
best_cfg,best_score=evaluate_models(history,forcast_days,range(5),range(5),range(5))
preds=[]
model=None
model = ARIMA(history, order=best_cfg)
model_fit = model.fit(disp=0)
preds=model_fit.forecast(steps=forcast_days)[0]
preds=[round(p)if p>0 else 0 for p in preds]
dates=[max_date_train+timedelta(days=day+1)for day in range(forcast_days)]
predictions_f=pd.DataFrame()
predictions_f['Date']=dates
predictions_f['Fatalities']=preds
test_sliced=test_sliced.merge(sliced_data_bck[['Date','Fatalities']], on='Date',how='left')
test_sliced['Fatalities']=test_sliced['Fatalities_y']
del test_sliced['Fatalities_y']
del test_sliced['Fatalities_x']
test_sliced=test_sliced.merge(predictions_f, on='Date',how='left')
test_sliced['Fatalities_x'][test_sliced['Fatalities_x'].isna() ]=test_sliced['Fatalities_y'][test_sliced['Fatalities_x'].isna() ]
test_sliced['Fatalities']=test_sliced['Fatalities_x']
del test_sliced['Fatalities_y']
del test_sliced['Fatalities_x']
test=test.merge(test_sliced,on='ForecastId',how='left')
test['ConfirmedCases_x'][test['ConfirmedCases_y'].notna() ]=test['ConfirmedCases_y'][test['ConfirmedCases_y'].notna() ]
test['Fatalities_x'][test['Fatalities_y'].notna() ]=test['Fatalities_y'][test['Fatalities_y'].notna() ]
new_cols=[]
for col in test.columns:
if col[-2:]=='_y':
del test[col]
elif col[-2:]=='_x':
new_cols.append(col[:-2])
else:
new_cols.append(col)
test.columns=new_cols
test.loc[(test['Province_State']==state)&(test['Country_Region']==country),:].head()
plt.plot('Date', 'ConfirmedCases', data=sliced_data, color='blue', linewidth=2)
plt.plot('Date','ConfirmedCases',data=test_sliced,color='orange',linewidth=2)
plt.plot('Date', 'Fatalities', data=sliced_data, color='purple', linewidth=2)
plt.plot('Date','Fatalities',data=test_sliced,color='red',linewidth=2)
plt.show()<filter> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model2 = get_model(ResNet101V2)
model2.load_weights("/kaggle/input/start-with-pre-train/my_ef_net_b7.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | test.loc[(test['Province_State']==state)&(test['Country_Region']==country),['Country_Region','Date','ConfirmedCases','Fatalities']]<feature_engineering> | val_dataset = get_validation_dataset()
images_ds = val_dataset.map(lambda image, label: image)
labels_ds = val_dataset.map(lambda image, label: label ).unbatch()
val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
m1 = model1.predict(images_ds)
m2 = model2.predict(images_ds)
scores = []
for alpha in np.linspace(0,1,100):
val_probabilities = alpha*m1+(1-alpha)*m2
val_predictions = np.argmax(val_probabilities, axis=-1)
scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro'))
best_alpha = np.argmax(scores)/100
print('Best alpha: ' + str(best_alpha)) | Petals to the Metal - Flower Classification on TPU |
13,558,595 | <remove_duplicates><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities1 = model1.predict(test_images_ds)
probabilities2 = model2.predict(test_images_ds)
probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<save_to_csv> | !pip install -q efficientnet | Petals to the Metal - Flower Classification on TPU |
13,366,635 | test.to_csv("test_2.csv" )<save_to_csv> | %matplotlib inline
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | sumb=pd.read_csv('/kaggle/input/covid19-global-forecasting-week-3/submission.csv')
output=pd.DataFrame()
output['ForecastId']=test['ForecastId'].astype(int)
output['ConfirmedCases']=test['ConfirmedCases'].astype(int)
output['Fatalities']=test['Fatalities'].astype(int)
output.to_csv('submission.csv',index=False)
<load_from_csv> | GCS_DS_PATH = KaggleDatasets().get_gcs_path("tpu-getting-started" ) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | titanic_data=pd.read_csv("/kaggle/input/titanic/train.csv")
titanic_data.head()<load_from_csv> | IMAGE_SIZE = [512, 512]
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
GCS_PATH_SELECT = {
192: GCS_DS_PATH + '/tfrecords-jpeg-192x192',
224: GCS_DS_PATH + '/tfrecords-jpeg-224x224',
331: GCS_DS_PATH + '/tfrecords-jpeg-331x331',
512: GCS_DS_PATH + '/tfrecords-jpeg-512x512'
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
SEED = 2020 | Petals to the Metal - Flower Classification on TPU |
13,366,635 | test_data=pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()<feature_engineering> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def get_validation_dataset() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=True)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Dataset: {} validation images, {} unlabeled test images'.format(NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | !pip install --upgrade pip
!pip install nameparser
titanic_data['NameTitle']=titanic_data['Name'].apply(lambda x: HumanName(x ).title)
test_data['NameTitle']=test_data['Name'].apply(lambda x: HumanName(x ).title)
<groupby> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model1 = get_model(DenseNet201)
model1.load_weights("/kaggle/input/start-with-pre-train-densenet201/my_densenet201.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | titanic_data.groupby(['NameTitle'] ).agg('count')
<create_dataframe> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
return model
with strategy.scope() :
model2 = get_model(Xception)
model2.load_weights("/kaggle/input/start-with-pre-train-xception/my_xception.h5" ) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | feature_list=["Pclass","Sex","Age","SibSp","Parch","Fare","NameTitle", "Survived"]
feature_list_minus_y=["Pclass","Sex","Age","SibSp","Parch","Fare","NameTitle"]
titanic_data=titanic_data[feature_list]
titanic_test=test_data
test_data=test_data[feature_list_minus_y]<feature_engineering> | val_dataset = get_validation_dataset()
images_ds = val_dataset.map(lambda image, label: image)
labels_ds = val_dataset.map(lambda image, label: label ).unbatch()
val_labels = next(iter(labels_ds.batch(NUM_VALIDATION_IMAGES)) ).numpy()
m1 = model1.predict(images_ds)
m2 = model2.predict(images_ds)
scores = []
for alpha in np.linspace(0,1,100):
val_probabilities = alpha*m1+(1-alpha)*m2
val_predictions = np.argmax(val_probabilities, axis=-1)
scores.append(f1_score(val_labels, val_predictions, labels=range(104), average='macro'))
best_alpha = np.argmax(scores)/100
print('Best alpha: ' + str(best_alpha)) | Petals to the Metal - Flower Classification on TPU |
13,366,635 | <normalization><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities1 = model1.predict(test_images_ds)
probabilities2 = model2.predict(test_images_ds)
probabilities = best_alpha * probabilities1 +(1 - best_alpha)* probabilities2
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<categorify> | !pip install -q efficientnet | Petals to the Metal - Flower Classification on TPU |
13,170,796 | titanic_data=standardizeData(titanic_data)
test_data=standardizeData(test_data )<split> | from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications.xception import Xception
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet101V2, ResNet152V2
from tensorflow.keras.applications.nasnet import NASNetLarge
from efficientnet.tfkeras import EfficientNetB7, EfficientNetL2, EfficientNetB0, EfficientNetB1 | Petals to the Metal - Flower Classification on TPU |
13,170,796 | train_data, mytest_data = train_test_split(titanic_data, test_size=0.2 )<prepare_x_and_y> | %matplotlib inline
print("Tensorflow version " + tf.__version__ ) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | y_train=train_data["Survived"]
y_test=mytest_data["Survived"]
X_train=train_data.drop("Survived",axis=1)
X_test=mytest_data.drop("Survived",axis=1 )<train_model> | GCS_DS_PATH = KaggleDatasets().get_gcs_path() | Petals to the Metal - Flower Classification on TPU |
13,170,796 | model=GradientBoostingClassifier(n_estimators=100,max_depth=5,random_state=1)
model.fit(X_train,y_train )<compute_test_metric> | IMAGE_SIZE = [331, 331]
EPOCHS = 30
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
GCS_PATH_SELECT = {
192: GCS_DS_PATH + '/tfrecords-jpeg-192x192',
224: GCS_DS_PATH + '/tfrecords-jpeg-224x224',
331: GCS_DS_PATH + '/tfrecords-jpeg-331x331',
512: GCS_DS_PATH + '/tfrecords-jpeg-512x512'
}
GCS_PATH = GCS_PATH_SELECT[IMAGE_SIZE[0]]
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
SEED = 2020 | Petals to the Metal - Flower Classification on TPU |
13,170,796 | mypredictions=model.predict(X_test)
print(accuracy_score(y_test, mypredictions))
print('F1 score:',f1_score(y_test, mypredictions))<choose_model_class> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label):
flag = 3
coef_1 = random.randint(70, 90)* 0.01
coef_2 = random.randint(70, 90)* 0.01
if flag == 1:
image = tf.image.random_flip_left_right(image, seed=SEED)
elif flag == 2:
image = tf.image.random_flip_up_down(image, seed=SEED)
else:
image = tf.image.random_crop(image, [int(IMAGE_SIZE[0]*coef_1), int(IMAGE_SIZE[0]*coef_2), 3],seed=SEED)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset() :
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=False)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
print('Dataset: {} training images, {} validation images, {} unlabeled test images'.format(NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES)) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | tf.random.set_seed(seed_value)
nmodel=tf.keras.models.Sequential([tf.keras.layers.InputLayer(input_shape=(16,)) ,
tf.keras.layers.Dense(8,activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(8,activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(8,activation=tf.nn.relu),
tf.keras.layers.Dropout(0.4),
tf.keras.layers.Dense(1,activation='sigmoid')])
<train_model> | LR_START = 0.00001
LR_MAX = 0.00005 * strategy.num_replicas_in_sync
LR_MIN = 0.00001
LR_RAMPUP_EPOCHS = 5
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.75
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True)
rng = [i for i in range(EPOCHS)]
y = [lrfn(x)for x in rng]
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | nmodel.compile(optimizer='Adam',
loss='mean_squared_error',metrics=['accuracy'])
nmodel.fit(X_train,y_train,batch_size=32,epochs=50 )<compute_test_metric> | def get_model(use_model):
base_model = use_model(weights='imagenet',
include_top=False, pooling='avg',
input_shape=(*IMAGE_SIZE, 3))
x = base_model.output
predictions = Dense(104, activation='softmax' )(x)
return Model(inputs=base_model.input, outputs=predictions)
with strategy.scope() :
model = get_model(ResNet50)
model.compile(
optimizer='nadam',
loss = 'sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy']
)
| Petals to the Metal - Flower Classification on TPU |
13,170,796 | y_pred=nmodel.predict(X_test)
y_pred = [np.round(x)for x in y_pred]
print('Accuracy:',accuracy_score(y_test, y_pred))
score=nmodel.evaluate(X_test,y_test,verbose=1)
<prepare_x_and_y> | history = model.fit(get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
callbacks=[lr_callback, ModelCheckpoint(filepath='my_ef_net_b7.h5', monitor='val_loss',
save_best_only=True)],
validation_data=get_validation_dataset() ,
workers = 3 ) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | y_train=titanic_data["Survived"]
X_train=titanic_data
X_train=X_train.drop("Survived",axis=1 )<predict_on_test> | model = tf.keras.models.load_model('my_ef_net_b7.h5' ) | Petals to the Metal - Flower Classification on TPU |
13,170,796 | <save_to_csv><EOS> | test_ds = get_test_dataset(ordered=True)
print('Вычисляем предсказания...')
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
print('Создание файла submission.csv...')
test_ids_ds = test_ds.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
np.savetxt('submission.csv', np.rec.fromarrays([test_ids, predictions]), fmt=['%s', '%d'], delimiter=',', header='id,label', comments='' ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<set_options> | !pip install --quiet efficientnet
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore' ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def get_available_gpus() :
local_device_protos = device_lib.list_local_devices()
print(local_device_protos)
return [x.name for x in local_device_protos if x.device_type == 'GPU']
get_available_gpus()<define_variables> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print(f'Running on TPU {tpu.master() }')
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}' ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | print(os.listdir(".. /input"))
directory_train = ".. /input/train.csv"
directory_test = ".. /input/test.csv"<load_from_csv> | BATCH_SIZE = 16 * REPLICAS
LEARNING_RATE = 3e-5 * REPLICAS
EPOCHS = 20
HEIGHT = 331
WIDTH = 331
CHANNELS = 3
N_CLASSES = 104
ES_PATIENCE = 5
N_FOLDS = 5
FOLDS_USED = 5 | Petals to the Metal - Flower Classification on TPU |
12,363,766 | test_data = pd.read_csv(directory_test)
print(test_data.shape)
test_data.head(5 )<load_from_csv> | GCS_PATH = KaggleDatasets().get_gcs_path() + '/tfrecords-jpeg-%sx%s' %(HEIGHT, WIDTH)
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')+ tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
CLASSES = [
'pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea',
'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood',
'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle',
'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily',
'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower',
'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy',
'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly',
'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip',
'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia',
'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy',
'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower',
'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia',
'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone',
'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum',
'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea',
'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower',
'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus',
'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ',
'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill',
'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia',
'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily',
'common tulip', 'wild rose'] | Petals to the Metal - Flower Classification on TPU |
12,363,766 | train_data = pd.read_csv(directory_train)
print(train_data.shape)
train_data.head(5 )<categorify> | def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target_(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)> 0:
plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return(subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target_(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show() | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def format_label() :
return [train_data["label"],
train_data.drop(labels = ["label"],axis = 1)
]
y_train, x_train = format_label()
print(x_train.head(5), y_train.head(5))<categorify> | def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=CHANNELS)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def get_dataset(filenames, labeled=True, ordered=True, repeated=False, shufled=False, augmented=False):
dataset = load_dataset(filenames, labeled=labeled, ordered=ordered)
if augmented:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
if repeated:
dataset = dataset.repeat()
if shufled:
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_training_dataset_preview(ordered=True):
dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def normalization() :
return [
x_train / 255.0,
test_data / 255.0
]
def reshape() :
return [
x_train.values.reshape(-1,28, 28,1),
test_data.values.reshape(-1,28,28,1)
]
def tensorflow_encoding() :
return to_categorical(y_train, num_classes = 10)
x_train, test_data = normalization()
print('x_train:',x_train.shape)
print('test:',test_data.shape)
print("------ Reshape ---------------")
x_train, test_data = reshape()
print('x_train:',x_train.shape)
print('test:',test_data.shape)
y_train = tensorflow_encoding()
print(y_train )<split> | def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
print(f'Number of training images {NUM_TRAINING_IMAGES}')
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(f'Number of test images {NUM_TEST_IMAGES}' ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def separating_data() :
return train_test_split(x_train,
y_train,
test_size = 0.1,
random_state = 42
)
x_train, x_validation, y_train, y_validation = separating_data()<choose_model_class> | train_dataset = get_training_dataset_preview(ordered=True)
display_batch_of_images(next(iter(train_dataset.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | activations = ['relu', 'softmax']
max_epochs = 2
model = Sequential()
model.add(Conv2D(filters = 32,
kernel_size =(5,5),
padding = 'Same',
activation = activations[0],
input_shape =(28,28,1)))
model.add(Conv2D(filters = 32,
kernel_size =(5,5),
padding = 'Same',
activation = activations[0]))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64,
kernel_size =(3,3),
padding = 'Same',
activation = activations[0]))
model.add(Conv2D(filters = 64,
kernel_size =(3,3),
padding = 'Same',
activation = activations[0]))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = activations[0]))
model.add(Dropout(0.5))
model.add(Dense(10, activation = activations[1]))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=["accuracy"])
generator = ImageDataGenerator()
batches = generator.flow(x_train,
y_train,
batch_size=64)
batches_value = generator.flow(x_validation,
y_validation,
batch_size=64)
history = model.fit_generator(generator=batches,
steps_per_epoch=batches.n,
epochs=max_epochs,
validation_data=batches_value,
validation_steps=batches_value.n )<save_to_csv> | test_dataset = get_test_dataset(ordered=True)
display_batch_of_images(next(iter(test_dataset.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | results = model.predict(test_data)
results = np.argmax(results,axis = 1)
results = pd.Series(results,name="Label")
submission = pd.concat([pd.Series(range(1,28001),
name = "ImageId"),
results],axis = 1)
submission.to_csv("submission.csv",index=False )<set_options> | def random_dropout(img, sl=0.1, sh=0.2, rl=0.4):
p=random.random()
if p>=0.25:
w, h, c = HEIGHT, WIDTH, 3
origin_area = tf.cast(h*w, tf.float32)
e_size_l = tf.cast(tf.round(tf.sqrt(origin_area * sl * rl)) , tf.int32)
e_size_h = tf.cast(tf.round(tf.sqrt(origin_area * sh / rl)) , tf.int32)
e_height_h = tf.minimum(e_size_h, h)
e_width_h = tf.minimum(e_size_h, w)
erase_height = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_height_h, dtype=tf.int32)
erase_width = tf.random.uniform(shape=[], minval=e_size_l, maxval=e_width_h, dtype=tf.int32)
erase_area = tf.zeros(shape=[erase_height, erase_width, c])
erase_area = tf.cast(erase_area, tf.uint8)
pad_h = h - erase_height
pad_top = tf.random.uniform(shape=[], minval=0, maxval=pad_h, dtype=tf.int32)
pad_bottom = pad_h - pad_top
pad_w = w - erase_width
pad_left = tf.random.uniform(shape=[], minval=0, maxval=pad_w, dtype=tf.int32)
pad_right = pad_w - pad_left
erase_mask = tf.pad([erase_area], [[0,0],[pad_top, pad_bottom], [pad_left, pad_right], [0,0]], constant_values=1)
erase_mask = tf.squeeze(erase_mask, axis=0)
erased_img = tf.multiply(tf.cast(img,tf.float32), tf.cast(erase_mask, tf.float32))
return tf.cast(erased_img, img.dtype)
else:
return tf.cast(img, img.dtype ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | %matplotlib inline
init_notebook_mode(connected=True)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
print(os.listdir(".. /input"))
<define_variables> | def blockout(image, DIM = HEIGHT, PROBABILITY = 1, CT = 8, SZ = 0.2):
prob = tf.cast(tf.random.uniform([],0,1)<PROBABILITY, tf.int32)
if(prob==0)|(CT==0)|(SZ==0): return image
for k in range(CT):
x = tf.cast(tf.random.uniform([],0,DIM),tf.int32)
y = tf.cast(tf.random.uniform([],0,DIM),tf.int32)
WIDTH = tf.cast(SZ*DIM,tf.int32)* prob
ya = tf.math.maximum(0,y-WIDTH//2)
yb = tf.math.minimum(DIM,y+WIDTH//2)
xa = tf.math.maximum(0,x-WIDTH//2)
xb = tf.math.minimum(DIM,x+WIDTH//2)
one = image[ya:yb,0:xa,:]
two = tf.zeros([yb-ya,xb-xa,3])
three = image[ya:yb,xb:DIM,:]
middle = tf.concat([one,two,three],axis=1)
image = tf.concat([image[0:ya,:,:],middle,image[yb:DIM,:,:]],axis=0)
image = tf.reshape(image,[DIM,DIM,3])
return image | Petals to the Metal - Flower Classification on TPU |
12,363,766 | img_rows, img_cols = 28, 28
np.random.seed(5)
<find_best_params> | def data_augment(image, label):
p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_shift = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
if p_spatial >=.2:
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_rotate >.75:
image = tf.image.rot90(image, k=3)
elif p_rotate >.5:
image = tf.image.rot90(image, k=2)
elif p_rotate >.25:
image = tf.image.rot90(image, k=1)
if p_rotation >=.3:
image = transform_rotation(image, height=HEIGHT, rotation=45.)
if p_shift >=.3:
image = transform_shift(image, height=HEIGHT, h_shift=15., w_shift=15.)
if p_shear >=.3:
image = transform_shear(image, height=HEIGHT, shear=20.)
if p_crop >.4:
crop_size = tf.random.uniform([], int(HEIGHT*.7), HEIGHT, dtype=tf.int32)
image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
elif p_crop >.7:
if p_crop >.9:
image = tf.image.central_crop(image, central_fraction=.7)
elif p_crop >.8:
image = tf.image.central_crop(image, central_fraction=.8)
else:
image = tf.image.central_crop(image, central_fraction=.9)
image = tf.image.resize(image, size=[HEIGHT, WIDTH])
if p_pixel >=.2:
if p_pixel >=.8:
image = tf.image.random_saturation(image, lower=0, upper=2)
elif p_pixel >=.6:
image = tf.image.random_contrast(image, lower=.8, upper=2)
elif p_pixel >=.4:
image = tf.image.random_brightness(image, max_delta=.2)
else:
image = tf.image.adjust_gamma(image, gamma=.6)
image = blockout(image)
return image, label | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def get_best_score(model):
print(model.best_score_)
print(model.best_params_)
print(model.best_estimator_)
return model.best_score_<compute_test_metric> | def transform_rotation(image, height, rotation):
DIM = height
XDIM = DIM%2
rotation = rotation * tf.random.uniform([1],dtype='float32')
rotation = math.pi * rotation / 180.
c1 = tf.math.cos(rotation)
s1 = tf.math.sin(rotation)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = tf.reshape(tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3])
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shear(image, height, shear):
DIM = height
XDIM = DIM%2
shear = shear * tf.random.uniform([1],dtype='float32')
shear = math.pi * shear / 180.
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
c2 = tf.math.cos(shear)
s2 = tf.math.sin(shear)
shear_matrix = tf.reshape(tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3])
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shift(image, height, h_shift, w_shift):
DIM = height
XDIM = DIM%2
height_shift = h_shift * tf.random.uniform([1],dtype='float32')
width_shift = w_shift * tf.random.uniform([1],dtype='float32')
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
shift_matrix = tf.reshape(tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3])
x = tf.repeat(tf.range(DIM//2,-DIM//2,-1), DIM)
y = tf.tile(tf.range(-DIM//2,DIM//2),[DIM])
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack([x,y,z])
idx2 = K.dot(shift_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
idx3 = tf.stack([DIM//2-idx2[0,], DIM//2-1+idx2[1,]])
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3] ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | def print_validation_report(y_true, y_pred):
print("Classification Report")
print(classification_report(y_true, y_pred))
acc_sc = accuracy_score(y_true, y_pred)
print("Accuracy : "+ str(acc_sc))
return acc_sc<load_from_csv> | train_dataset_aug = get_dataset(TRAINING_FILENAMES, labeled=True, ordered=False, repeated=True, shufled=True, augmented=True)
display_batch_of_images(next(iter(train_dataset_aug.unbatch().batch(20))))
display_batch_of_images(next(iter(train_dataset_aug.unbatch().batch(20))))
display_batch_of_images(next(iter(train_dataset_aug.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )<load_from_csv> | def create_model(input_shape, N_CLASSES):
base_model = efn.EfficientNetB4(weights='noisy-student',
include_top=False,
input_shape=input_shape)
model = tf.keras.Sequential([
base_model,
L.GlobalAveragePooling2D() ,
L.Dense(N_CLASSES, activation='softmax')
])
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer,
loss=losses.SparseCategoricalCrossentropy() ,
metrics=['sparse_categorical_accuracy'])
return model | Petals to the Metal - Flower Classification on TPU |
12,363,766 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )<prepare_x_and_y> | kfold = KFold(N_FOLDS, shuffle=True, random_state=seed)
history_list = []
complete_dataset = get_dataset(TRAINING_FILENAMES, labeled=True, ordered=True)
test_dataset = get_dataset(TEST_FILENAMES, labeled=False, ordered=True)
x_complete = complete_dataset.map(lambda image, label: image)
y_complete = next(iter(complete_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy()
x_test = test_dataset.map(lambda image, idnum: image)
complete_preds = np.zeros(( NUM_TRAINING_IMAGES, N_CLASSES))
test_preds = np.zeros(( NUM_TEST_IMAGES, N_CLASSES))
for n_fold,(trn_ind, val_ind)in enumerate(kfold.split(TRAINING_FILENAMES)) :
if n_fold >= FOLDS_USED:
break
print(f'
FOLD: {n_fold+1}')
tf.tpu.experimental.initialize_tpu_system(tpu)
fold_train_filenames = np.asarray(TRAINING_FILENAMES)[trn_ind]
fold_valid_filenames = np.asarray(TRAINING_FILENAMES)[val_ind]
train_size = count_data_items(fold_train_filenames)
validation_size = count_data_items(fold_valid_filenames)
STEPS_PER_EPOCH = train_size // BATCH_SIZE
K.clear_session()
model_path = f'model_{HEIGHT}x{WIDTH}_fold_{n_fold+1}.h5'
with strategy.scope() :
model = create_model(( None, None, CHANNELS), N_CLASSES)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True)
lr_callback = LearningRateScheduler(exponential_schedule_with_warmup, verbose=0)
history = model.fit(x=get_dataset(fold_train_filenames, labeled=True, ordered=False, repeated=True, shufled=True, augmented=True),
validation_data=get_dataset(fold_valid_filenames, labeled=True, ordered=True),
callbacks=[checkpoint, es, lr_callback],
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
verbose=2 ).history
history_list.append(history)
complete_preds += model.predict(x_complete)/ FOLDS_USED
test_preds += model.predict(x_test)/ FOLDS_USED
complete_preds = np.argmax(complete_preds, axis=-1)
test_preds = np.argmax(test_preds, axis=-1 ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | y = train["label"]
X = train.drop(["label"],axis = 1)
X_test = test<feature_engineering> | print(classification_report(y_complete, complete_preds, target_names=CLASSES)) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | X = X/255.0
X_test = X_test/255.0<prepare_x_and_y> | x_samp, y_samp = dataset_to_numpy_util(complete_dataset, 9)
samp_preds = model.predict(x_samp, batch_size=9)
display_9_images_with_predictions(x_samp, samp_preds, y_samp ) | Petals to the Metal - Flower Classification on TPU |
12,363,766 | <count_values><EOS> | test_ids_ds = test_dataset.map(lambda image, idnum: idnum ).unbatch()
test_ids = next(iter(test_ids_ds.batch(NUM_TEST_IMAGES)) ).numpy().astype('U')
submission = pd.DataFrame(test_ids, columns=['id'])
submission['label'] = test_preds
submission.to_csv('submission.csv', index=False)
display(submission.head(10)) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | <SOS> metric: macrofscore Kaggle data source: petals-to-the-metal-flower-classification-on-tpu<define_variables> | def seed_everything(seed=0):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISSTIC_OPS'] = '1'
seed = 0
seed_everything(seed)
warnings.filterwarnings("ignore" ) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | li_idxs = []
for i in range(10):
for nr in range(10):
ix = y[y==nr].index[i]
li_idxs.append(ix )<train_on_grid> | try :
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU', tpu.master())
except ValueError:
tpu = None
if tpu :
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else :
strategy = tf.distribute.get_strategy()
print('RELICAS :', strategy.num_replicas_in_sync ) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | clf_Perceptron = Perceptron(random_state=0)
param_grid = { 'penalty': ['l1','l2'], 'tol': [0.05, 0.1] }
GridCV_Perceptron = GridSearchCV(clf_Perceptron, param_grid, verbose=1, cv=5)
GridCV_Perceptron.fit(X_train,y_train)
score_grid_Perceptron = get_best_score(GridCV_Perceptron )<predict_on_test> | BATCH_SIZE = 16 * strategy.num_replicas_in_sync
WARMUP_EPOCHS = 3
WARMUP_LEARNING_RATE = 1e-4 * strategy.num_replicas_in_sync
EPOCHS = 20
LEARNING_RATE = 3e-5 * strategy.num_replicas_in_sync
HEIGHT = 512
WIDTH = 512
CHANNELS = 3
N_CLASSES = 104
ES_PATIENCE = 6
RLROP_PATIENCE = 3
DECAY_DROP = 0.3
model_path = 'DenseNet201_%sx%s.h5'%(HEIGHT, WIDTH)
GCS_PATH = KaggleDatasets().get_gcs_path() + '/tfrecords-jpeg-%sx%s'%(HEIGHT, WIDTH)
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train/*.tfrec')
VALIDATION_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test/*.tfrec')
CLASSES = [
'pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea',
'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood',
'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle',
'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily',
'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower',
'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy',
'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william',
'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly',
'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip',
'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia',
'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy',
'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower',
'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia',
'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan',
'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris',
'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose',
'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily',
'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose',
'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily',
'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea',
'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower',
'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose'] | Petals to the Metal - Flower Classification on TPU |
11,757,999 | pred_val_perc = GridCV_Perceptron.predict(X_val )<compute_test_metric> | AUTO = tf.data.experimental.AUTOTUNE
def decode_image(image_data):
image = tf.image.decode_jpeg(image_data, channels=3)
image = tf.cast(image, tf.float32)/ 255.0
image = tf.reshape(image, [HEIGHT, WIDTH, 3])
return image
def read_labeled_tfrecord(example):
LABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"class": tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'])
label = tf.cast(example['class'], tf.int32)
return image, label
def read_unlabeled_tfrecord(example):
UNLABELED_TFREC_FORMAT = {
"image": tf.io.FixedLenFeature([], tf.string),
"id": tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'])
idnum = example['id']
return image, idnum
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(read_labeled_tfrecord if labeled else read_unlabeled_tfrecord, num_parallel_calls=AUTO)
return dataset
def data_augment(image, label):
image = tf.image.random_flip_left_right(image, seed=seed)
image = tf.image.random_flip_up_down(image, seed=seed)
image = tf.image.random_saturation(image, lower=0, upper=2, seed=seed)
image = tf.image.random_crop(image, size=[int(HEIGHT*.8), int(WIDTH*.8), CHANNELS], seed=seed)
return image, label
def get_training_dataset() :
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def get_training_dataset_preview(ordered=True):
dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALIDATION_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTO)
return dataset
def count_data_items(filenames):
n = [int(re.compile(r"-([0-9]*)\." ).search(filename ).group(1)) for filename in filenames]
return np.sum(n ) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | acc_perc = print_validation_report(y_val, pred_val_perc )<train_on_grid> | np.set_printoptions(threshold=15, linewidth=80)
def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object:
numpy_labels = [None for _ in enumerate(numpy_images)]
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title)> 0:
plt.title(title, fontsize=int(titlesize)if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return(subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
for i,(image, label)in enumerate(zip(images[:rows*cols], labels[:rows*cols])) :
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3
subplot = display_one_flower(image, title, subplot, not correct, titlesize=dynamic_titlesize)
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show()
def dataset_to_numpy_util(dataset, N):
dataset = dataset.unbatch().batch(N)
for images, labels in dataset:
numpy_images = images.numpy()
numpy_labels = labels.numpy()
break;
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
label = np.argmax(label, axis=-1)
correct =(label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], str(correct), ', shoud be ' if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_flower_eval(image, title, subplot, red=False):
plt.subplot(subplot)
plt.axis('off')
plt.imshow(image)
plt.title(title, fontsize=14, color='red' if red else 'black')
return subplot+1
def display_9_images_with_predictions(images, predictions, labels):
subplot=331
plt.figure(figsize=(13,13))
for i, image in enumerate(images):
title, correct = title_from_label_and_target(predictions[i], labels[i])
subplot = display_one_flower_eval(image, title, subplot, not correct)
if i >= 8:
break;
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show() | Petals to the Metal - Flower Classification on TPU |
11,757,999 | clf_LR = LogisticRegression(random_state=0)
param_grid = {'C': [0.014,0.012], 'multi_class': ['multinomial'],
'penalty': ['l1'],'solver': ['saga'], 'tol': [0.1] }
GridCV_LR = GridSearchCV(clf_LR, param_grid, verbose=1, cv=5)
GridCV_LR.fit(X_train,y_train)
score_grid_LR = get_best_score(GridCV_LR )<predict_on_test> | NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
train_dataset = get_training_dataset_preview(ordered=True)
y_train = next(iter(train_dataset.unbatch().map(lambda image, label: label ).batch(NUM_TRAINING_IMAGES)) ).numpy()
print('Number of training images %d' % NUM_TRAINING_IMAGES)
NUM_VALIDATION_IMAGES = count_data_items(VALIDATION_FILENAMES)
valid_dataset = get_validation_dataset(ordered=True)
y_valid = next(iter(valid_dataset.unbatch().map(lambda image, label: label ).batch(NUM_VALIDATION_IMAGES)) ).numpy()
print('Number of validation images %d' % NUM_VALIDATION_IMAGES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print('Number of test images %d' % NUM_TEST_IMAGES)
test_dataset = get_test_dataset(ordered=True ) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | pred_val_lr = GridCV_LR.predict(X_val)
acc_lr = print_validation_report(y_val, pred_val_lr )<train_model> | display_batch_of_images(next(iter(train_dataset.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | clf_knn = KNeighborsClassifier(n_neighbors=10)
clf_knn.fit(X_train,y_train )<predict_on_test> | display_batch_of_images(next(iter(valid_dataset.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | pred_val_knn = clf_knn.predict(X_val)
acc_knn = print_validation_report(y_val, pred_val_knn )<train_on_grid> | display_batch_of_images(next(iter(test_dataset.unbatch().batch(20)))) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | clf_RF = RandomForestClassifier(random_state=0)
param_grid = {'max_depth': [15], 'max_features': [100],
'min_samples_split': [5],'n_estimators' : [50] }
GridCV_RF = GridSearchCV(clf_RF, param_grid, verbose=1, cv=5)
GridCV_RF.fit(X_train,y_train)
score_grid_RF = get_best_score(GridCV_RF )<predict_on_test> | def create_model(input_shape, N_CLASSES):
base_model = applications.DenseNet201(weights='imagenet',
include_top=False,
input_shape=input_shape)
base_model.trainable = False
model = tf.keras.Sequential([
base_model,
layers.GlobalAveragePooling2D() ,
layers.Dense(N_CLASSES, activation='softmax')
])
return model | Petals to the Metal - Flower Classification on TPU |
11,757,999 | pred_val_rf = GridCV_RF.predict(X_val )<compute_test_metric> | with strategy.scope() :
model = create_model(( None, None, CHANNELS), N_CLASSES)
metric_list = ['sparse_categorical_accuracy']
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer,
loss = 'sparse_categorical_crossentropy',
metrics=metric_list)
model.summary() | Petals to the Metal - Flower Classification on TPU |
11,757,999 | acc_rf = print_validation_report(y_val, pred_val_rf )<train_model> | STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
warmup_history = model.fit(x=get_training_dataset() ,
steps_per_epoch = STEPS_PER_EPOCH,
validation_data = get_validation_dataset() ,
epochs=WARMUP_EPOCHS,
verbose=2 ).history | Petals to the Metal - Flower Classification on TPU |
11,757,999 | clf_svm = SVC(C=5, gamma=0.05, kernel='rbf', random_state=0)
clf_svm.fit(X_train,y_train )<predict_on_test> | LR_START = 0.00000001
LR_MIN = 0.000001
LR_MAX = LEARNING_RATE
LR_RAMPUP_EPOCHS = 3
LR_SUSTAIN_EPOCHS = 0
LR_EXP_DECAY =.8
def lrfn(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_EXP_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
rng = [i for i in range(EPOCHS)]
y = [lrfn(x)for x in rng]
sns.set(style='whitegrid')
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate shcedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1])) | Petals to the Metal - Flower Classification on TPU |
11,757,999 | pred_val_svm = clf_svm.predict(X_val)
acc_svm = print_validation_report(y_val, pred_val_svm )<define_variables> | for layer in model.layers:
layer.trainable = True
checkpoint = ModelCheckpoint(model_path, monitor='val_loss', mode='min', save_best_only=True)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE,
restore_best_weights=True, verbose=1)
lr_callback = LearningRateScheduler(lrfn, verbose=1)
callback_list = [checkpoint, es, lr_callback]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=metric_list)
model.summary() | Petals to the Metal - Flower Classification on TPU |
11,757,999 | batchsize = int(nr_samples/15 )<train_on_grid> | history = model.fit(x=get_training_dataset() ,
steps_per_epoch=STEPS_PER_EPOCH,
validation_data=get_validation_dataset() ,
callbacks=callback_list,
epochs=EPOCHS,
verbose=2 ).history | Petals to the Metal - Flower Classification on TPU |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.