kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
21,648,756 | input_word_ids = layers.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = layers.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = layers.Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = layers.Dense(1, activation='sigmoid' )(clf_output)
model = models.Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(optimizers.Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
model.summary()<train_model> | df_train=pd.read_csv('.. /input/digit-recognizer/train.csv')
df_test=pd.read_csv('.. /input/digit-recognizer/test.csv')
print([df_train.shape,df_test.shape])
print(df_train.iloc[265,1:].values.reshape(28,28)[:,10] ) | Digit Recognizer |
21,648,756 | checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True)
train_history = model.fit(
train_input, train_labels,
validation_split=0.2,
epochs=3,
callbacks=[checkpoint],
batch_size=16
)<predict_on_test> | images=['%s%s'%('pixel',pixel_no)for pixel_no in range(0,784)]
x_train=df_train[images].values/255.
x_train=x_train.reshape(-1,28,28,1)
y_train=df_train['label'].values
x_test_out=df_test[images].values/255.
x_test_out=x_test_out.reshape(-1,28,28,1 ) | Digit Recognizer |
21,648,756 | model.load_weights('model.h5')
test_pred = model.predict(test_input )<save_to_csv> | num_classes=10; img_size,img_size2=28,96
N=df_train.shape[0]; n=int (.1*N)
shuffle_ids=np.arange(N)
np.random.RandomState(123 ).shuffle(shuffle_ids)
x_train=x_train[shuffle_ids]; y_train=y_train[shuffle_ids]
x_test,x_valid,x_train=\
x_train[:n],x_train[n:2*n],x_train[2*n:]
y_test,y_valid,y_train=\
y_train[:n],y_train[n:2*n],y_train[2*n:]
df=pd.DataFrame(
[[x_train.shape,x_valid.shape,x_test.shape],
[x_train.dtype,x_valid.dtype,x_test.dtype],
[y_train.shape,y_valid.shape,y_test.shape],
[y_train.dtype,y_valid.dtype,y_test.dtype]],
columns=['train','valid','test'],
index=['image shape','image type','label shape','label type'])
df | Digit Recognizer |
21,648,756 | submission['target'] = test_pred.round().astype(int)
submission.to_csv('submission.csv', index=False )<import_modules> | def model() :
model=tf.keras.Sequential()
model.add(tkl.Input(shape=(28,28,1)))
model.add(tkl.BatchNormalization())
model.add(tkl.Conv2D(28,(5,5),padding='same'))
model.add(tkl.LeakyReLU(alpha=.02))
model.add(tkl.MaxPooling2D(pool_size=(2,2)))
model.add(tkl.Dropout (.2))
model.add(tkl.Conv2D(96,(5,5),padding='same'))
model.add(tkl.LeakyReLU(alpha=.02))
model.add(tkl.MaxPooling2D(strides=(2,2)))
model.add(tkl.Dropout (.2))
model.add(tkl.Conv2D(128,(5,5)))
model.add(tkl.LeakyReLU(alpha=.02))
model.add(tkl.MaxPooling2D(strides=(2,2)))
model.add(tkl.Dropout (.2))
model.add(tkl.GlobalMaxPooling2D())
model.add(tkl.Dense(1024))
model.add(tkl.LeakyReLU(alpha=.02))
model.add(tkl.Dropout (.5))
model.add(tkl.Dense(num_classes,activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy',
optimizer='nadam',
metrics=['sparse_categorical_accuracy'])
return model | Digit Recognizer |
21,648,756 | import pandas as pd<import_modules> | cnn_model=model()
checkpointer=tkc.ModelCheckpoint(
filepath='/tmp/checkpoint',verbose=2,save_weights_only=True,
monitor='val_sparse_categorical_accuracy',mode='max',save_best_only=True)
lr_reduction=tkc.ReduceLROnPlateau(
monitor='val_loss',patience=15,verbose=2,factor=.8)
early_stopping=tkc.EarlyStopping(
monitor='val_loss',patience=75,verbose=2)
history=cnn_model.fit(
x_train,y_train,epochs=120,batch_size=128,
verbose=2,validation_data=(x_valid,y_valid),
callbacks=[checkpointer,lr_reduction,early_stopping] ) | Digit Recognizer |
21,648,756 | import pandas as pd<load_from_csv> | cnn_model.load_weights('/tmp/checkpoint')
scores=cnn_model.evaluate(x_test,y_test,verbose=0 ) | Digit Recognizer |
21,648,756 | train_df = pd.read_csv(".. /input/nlp-getting-started/train.csv")
test_df = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
gt_df = pd.read_csv(".. /input/disasters-on-social-media/socialmedia-disaster-tweets-DFE.csv" )<data_type_conversions> | steps,epochs=int(len(x_train)/128),10
datagen=ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zoom_range=.2,shear_range=.2,rotation_range=30,
height_shift_range=.2,width_shift_range=.2)
datagen.fit(x_train)
history=cnn_model.\
fit(datagen.flow(x_train,y_train,batch_size=128),
steps_per_epoch=steps,epochs=epochs,verbose=2,
validation_data=datagen.flow(x_valid,y_valid,batch_size=16),
callbacks=[checkpointer,lr_reduction,early_stopping] ) | Digit Recognizer |
21,648,756 | gt_df = gt_df[['choose_one', 'text']]
gt_df['target'] =(gt_df['choose_one']=='Relevant' ).astype(int)
gt_df['id'] = gt_df.index
gt_df<merge> | cnn_model.load_weights('/tmp/checkpoint')
scores=cnn_model.evaluate(x_test,y_test,verbose=0 ) | Digit Recognizer |
21,648,756 | merged_df = pd.merge(test_df, gt_df, on='id')
merged_df<save_to_csv> | predict_y_test_out=cnn_model.predict(x_test_out)
predict_y_test_out=predict_y_test_out.argmax(axis=-1 ) | Digit Recognizer |
21,648,756 | subm_df.to_csv('submission.csv', index=False )<install_modules> | submission=pd.DataFrame(
{'ImageId':range(1,len(predict_y_test_out)+1),
'Label':predict_y_test_out})
print(submission[0:15].T)
submission.to_csv('kaggle_digits_cnn.csv',index=False ) | Digit Recognizer |
21,648,756 | ! pip install tf-models-official==2.4.0 -q
! pip install tensorflow-gpu==2.4.1 -q
! pip install tensorflow-text==2.4.1 -q
! python -m spacy download en_core_web_sm -q
! pip install dataprep | grep -v 'already satisfied'<set_options> | os.environ['TFHUB_MODEL_LOAD_FORMAT']='COMPRESSED'
model=th.load('https://tfhub.dev/captain-pool/esrgan-tf2/1')
func=model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
func.inputs[0].set_shape([1,img_size2//4,img_size2//4,3])
converter=tf.lite.TFLiteConverter.from_concrete_functions([func])
converter.optimizations=[tf.lite.Optimize.DEFAULT]
tflite_model=converter.convert()
with tf.io.gfile.GFile('ESRGAN.tflite','wb')as f:
f.write(tflite_model)
esrgan_model_path='./ESRGAN.tflite' | Digit Recognizer |
21,648,756 | np.set_printoptions(precision=4)
warnings.filterwarnings('ignore' )<import_modules> | N3=10000; n3=int (.1*N3)
x_train3=x_train[:N3]; y_train3=y_train[:N3]
x_valid3=x_valid[:n3]; y_valid3=y_valid[:n3]
x_test3=x_test[:n3]; y_test3=y_test[:n3]
x_train3=tf.repeat(x_train3,3,axis=3 ).numpy()
x_valid3=tf.repeat(x_valid3,3,axis=3 ).numpy()
x_test3=tf.repeat(x_test3,3,axis=3 ).numpy()
x_test3.shape,x_test3.mean() | Digit Recognizer |
21,648,756 | tf.__version__<define_variables> | def bicubic_resize(imgs,img_size=img_size2):
bicubic=tf.image.resize(
imgs*255,[img_size,img_size],tf.image.ResizeMethod.BICUBIC)
bicubic_contrast=tf.image.adjust_contrast(bicubic,.8)
bicubic_contrast=tf.cast(bicubic_contrast,tf.uint8)
return bicubic_contrast.numpy() /255 | Digit Recognizer |
21,648,756 | random.seed(319)
np.random.seed(319)
tf.random.set_seed(319 )<load_from_csv> | x_train3=bicubic_resize(x_train3)
x_valid3=bicubic_resize(x_valid3)
x_test3=bicubic_resize(x_test3)
x_test3.shape,x_test3.mean() | Digit Recognizer |
21,648,756 | train_full = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
test_full = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv')
print('Training Set Shape = {}'.format(train_full.shape))
print('Training Set Memory Usage = {:.2f}MB'.format(train_full.memory_usage().sum() /2**20))
print('Test Set Shape = {}'.format(test_full.shape))
print('Test Set Memory Usage = {:.2f}MB'.format(test_full.memory_usage().sum() /2**20))<load_from_csv> | def esrgantf2_superresolution(
img,super_size=img_size2,model_path=esrgan_model_path):
if img.mean() <1.: img=img*255.
lr=tf.image.resize(img,[super_size//4,super_size//4])
lr=tf.expand_dims(lr.numpy() [:,:,:3],axis=0)
lr=tf.cast(lr,tf.float32)
interpreter=tf.lite.Interpreter(model_path=model_path)
interpreter.allocate_tensors()
input_details=interpreter.get_input_details()
output_details=interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'],lr)
interpreter.invoke()
output_data=interpreter.get_tensor(output_details[0]['index'])
sr=tf.squeeze(output_data,axis=0)
sr=tf.clip_by_value(sr,0,255)
sr=tf.round(sr); sr=tf.cast(sr,tf.uint8)
lr=tf.cast(tf.squeeze(lr,axis=0),tf.uint8)
return lr,sr | Digit Recognizer |
21,648,756 | df_train = pd.read_csv("/kaggle/input/disastertweet-prepared2/train_prepared.csv")
df_test = pd.read_csv("/kaggle/input/disastertweet-prepared2/test_prepared.csv" )<drop_column> | for i in range(5):
lr,sr=esrgantf2_superresolution(
tf.repeat(x_train[i],3,axis=2 ).numpy())
low2superbicubic_imgs(
tf.repeat(x_train[i],3,axis=2 ).numpy() ,lr,sr ) | Digit Recognizer |
21,648,756 | train_full = clean_text(train_full,'keyword')
test_full = clean_text(test_full, 'keyword' )<feature_engineering> | def premodel(pix,den,mh,lbl,activ,loss):
model=tf.keras.Sequential([
tkl.Input(( pix,pix,3),name='input'),
th.KerasLayer(mh,trainable=True),
tkl.Flatten() ,
tkl.Dense(den,activation='relu'),
tkl.Dropout(rate=.5),
tkl.Dense(lbl,activation=activ)])
model.compile(optimizer='nadam',loss=loss,
metrics=['sparse_categorical_accuracy'])
return model
def cb(fw):
early_stopping=tkc.EarlyStopping(
monitor='val_loss',patience=10,verbose=2)
checkpointer=tkc.ModelCheckpoint(
filepath=fw,verbose=2,save_weights_only=True,
monitor='val_sparse_categorical_accuracy',
mode='max',save_best_only=True)
lr_reduction=tkc.ReduceLROnPlateau(
monitor='val_loss',verbose=2,patience=5,factor=.8)
return [checkpointer,early_stopping,lr_reduction] | Digit Recognizer |
21,648,756 | df_train['keyword'] = train_full['keyword']
df_test['keyword'] = test_full['keyword']<load_pretrained> | fw='/tmp/checkpoint'
handle_base='mobilenet_v2_100_%d'%img_size2
mhandle='https://tfhub.dev/google/imagenet/{}/classification/4'\
.format(handle_base)
hub_model=premodel(img_size2,1024,mhandle,num_classes,
'softmax','sparse_categorical_crossentropy')
history=hub_model.fit(x=x_train3,y=y_train3,batch_size=128,
epochs=20,callbacks=cb(fw),verbose=0,
validation_data=(x_valid3,y_valid3)) | Digit Recognizer |
21,648,756 | nlp_spacy = spacy.load('en_core_web_sm')
sentence_enc = hub.load('https://tfhub.dev/google/universal-sentence-encoder/4' )<categorify> | hub_model.load_weights('/tmp/checkpoint')
hub_model.evaluate(x_test3,y_test3,verbose=0 ) | Digit Recognizer |
21,052,731 | def extract_keywords(text):
potential_keywords = []
TOP_KEYWORD = -1
pos_tag = ['ADJ', 'NOUN', 'PROPN']
doc = nlp_spacy(text)
for i in doc:
if i.pos_ in pos_tag:
potential_keywords.append(i.text)
document_embed = sentence_enc([text])
potential_embed = sentence_enc(potential_keywords)
vector_distances = cosine_similarity(document_embed, potential_embed)
keyword = [potential_keywords[i] for i in vector_distances.argsort() [0][TOP_KEYWORD:]]
return keyword
def keyword_filler(keyword, text):
if pd.isnull(keyword):
try:
keyword = extract_keywords(text)[0]
except:
keyword = ''
return keyword<data_type_conversions> | %matplotlib inline
sns.set(style='white', context='notebook', palette='deep' ) | Digit Recognizer |
21,052,731 | df_train.keyword = pd.DataFrame(list(map(keyword_filler, df_train.keyword, df_train.text)) ).astype(str)
df_test.keyword = pd.DataFrame(list(map(keyword_filler, df_test.keyword, df_test.text)) ).astype(str)
print('Null Training Keywords => ', df_train['keyword'].isnull().any())
print('Null Test Keywords => ', df_test['keyword'].isnull().any() )<split> | df_train = pd.read_csv(".. /input/digit-recognizer/train.csv")
df_test = pd.read_csv(".. /input/digit-recognizer/test.csv" ) | Digit Recognizer |
21,052,731 | X_train, X_val, y_train, y_val = train_test_split(df_train[['text','keyword']],
df_train.target,
test_size=0.2,
random_state=42)
X_train.shape, X_val.shape<create_dataframe> | X_train = np.array(df_train.drop(['label'], axis=1), dtype="float32")/ 255.0
X_train = X_train.reshape(-1, 28, 28, 1)
Y_train = to_categorical(df_train['label'], num_classes = 10)
X_test = np.array(df_test, dtype="float32")/ 255.0
X_test = X_test.reshape(-1, 28, 28, 1 ) | Digit Recognizer |
21,052,731 | train_ds = tf.data.Dataset.from_tensor_slices(( dict(X_train), y_train))
val_ds = tf.data.Dataset.from_tensor_slices(( dict(X_val), y_val))
test_ds = tf.data.Dataset.from_tensor_slices(dict(df_test[['text','keyword']]))<categorify> | X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.05, random_state=34 ) | Digit Recognizer |
21,052,731 | AUTOTUNE = tf.data.experimental.AUTOTUNE
BUFFER_SIZE = 1000
BATCH_SIZE = 32
RANDOM_SEED = 319
def configure_dataset(dataset, shuffle=False, test=False):
if shuffle:
dataset = dataset.cache() \
.shuffle(BUFFER_SIZE, seed=RANDOM_SEED, reshuffle_each_iteration=True)\
.batch(BATCH_SIZE, drop_remainder=True)\
.prefetch(AUTOTUNE)
elif test:
dataset = dataset.cache() \
.batch(BATCH_SIZE, drop_remainder=False)\
.prefetch(AUTOTUNE)
else:
dataset = dataset.cache() \
.batch(BATCH_SIZE, drop_remainder=True)\
.prefetch(AUTOTUNE)
return dataset<create_dataframe> | model = Sequential()
model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1)))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(10, activation = "softmax"))
| Digit Recognizer |
21,052,731 | a3 = configure_dataset(train_ds, shuffle=True)
dict3 = []
for elem in a3:
dict3.append(elem[0]['text'][0])
dict3[:10]<prepare_x_and_y> | datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
datagen.fit(X_train ) | Digit Recognizer |
21,052,731 | train_ds = configure_dataset(train_ds, shuffle=True)
val_ds = configure_dataset(val_ds)
test_ds = configure_dataset(test_ds, test=True )<train_model> | optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"])
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc',
patience=3,
verbose=1,
factor=0.5,
min_lr=0.00001 ) | Digit Recognizer |
21,052,731 | del X_train, X_val, y_train, y_val, df_train, df_test, train_full, test_full<define_variables> | epochs=50
batch_size=128
history = model.fit(datagen.flow(X_train,Y_train, batch_size=batch_size),
epochs = epochs,
validation_data =(X_val,Y_val),
verbose = 2,
steps_per_epoch=X_train.shape[0] // batch_size,
callbacks=[learning_rate_reduction] ) | Digit Recognizer |
21,052,731 | bert_encoder_path = "https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4"
bert_preprocessor_path = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
keyword_embedding_path = "https://tfhub.dev/google/nnlm-en-dim128-with-normalization/2"<load_pretrained> | Y_pred = model.predict(X_val)
Y_pred_classes = np.argmax(Y_pred,axis = 1)
Y_true = np.argmax(Y_val,axis = 1 ) | Digit Recognizer |
21,052,731 | bert_encoder = hub.KerasLayer(bert_encoder_path, trainable=True, name="BERT_Encoder")
bert_preprocessor = hub.KerasLayer(bert_preprocessor_path, name="BERT_Preprocessor")
nnlm_embed = hub.KerasLayer(keyword_embedding_path, name="NNLM_Embedding" )<categorify> | errors =(Y_pred_classes - Y_true != 0)
Y_pred_classes_errors = Y_pred_classes[errors]
Y_pred_errors = Y_pred[errors]
Y_true_errors = Y_true[errors]
X_val_errors = X_val[errors] | Digit Recognizer |
21,052,731 | kernel_initializer = tf.keras.initializers.GlorotNormal(seed=319)
def create_model() :
text_input = Input(shape=() , dtype=tf.string, name="text")
encoder_inputs = bert_preprocessor(text_input)
encoder_outputs = bert_encoder(encoder_inputs)
pooled_output = encoder_outputs["pooled_output"]
bert_branch = Dropout(0.1,
seed=319,
name="BERT_Dropout" )(pooled_output)
keyword_input = Input(shape=() , dtype=tf.string, name='keyword')
keyword_embed = nnlm_embed(keyword_input)
keyword_flat = Flatten(name="Keyword_Flatten" )(keyword_embed)
keyword_dense1 = Dense(128,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(1e-4),
name="Keyword_Dense1"
)(keyword_flat)
keyword_branch1 = Dropout(0.5,
seed=319,
name='Keyword_dropout1'
)(keyword_dense1)
keyword_dense2 = Dense(128,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(1e-4),
name="Keyword_Dense2"
)(keyword_branch1)
keyword_branch2 = Dropout(0.5,
seed=319,
name='Keyword_dropout2'
)(keyword_dense2)
keyword_dense3 = Dense(128,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(1e-4),
name="Keyword_Dense3"
)(keyword_branch2)
keyword_branch3 = Dropout(0.5,
seed=319,
name='Keyword_dropout3'
)(keyword_dense3)
merge = concatenate([bert_branch, keyword_branch3], name="Concatenate")
dense = Dense(128,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=regularizers.l2(1e-4),
name="Merged_Dense" )(merge)
dropout = Dropout(0.5,
seed=319,
name="Merged_Dropout"
)(dense)
clf = Dense(1,
activation="sigmoid",
kernel_initializer=kernel_initializer,
name="Classifier"
)(dropout)
return Model([text_input, keyword_input],
clf,
name="BERT_Classifier" )<init_hyperparams> | Y_pred_errors_prob = np.max(Y_pred_errors,axis = 1)
true_prob_errors = np.diagonal(np.take(Y_pred_errors, Y_true_errors, axis=1))
delta_pred_true_errors = Y_pred_errors_prob - true_prob_errors
sorted_dela_errors = np.argsort(delta_pred_true_errors)
most_important_errors = sorted_dela_errors[-6:]
display_errors(most_important_errors, X_val_errors, Y_pred_classes_errors, Y_true_errors ) | Digit Recognizer |
21,052,731 | EPOCHS = 3
LEARNING_RATE = 5e-5
STEPS_PER_EPOCH = int(train_ds.unbatch().cardinality().numpy() / BATCH_SIZE)
VAL_STEPS = int(val_ds.unbatch().cardinality().numpy() / BATCH_SIZE)
TRAIN_STEPS = STEPS_PER_EPOCH * EPOCHS
WARMUP_STEPS = int(TRAIN_STEPS * 0.1)
adamw_optimizer = create_optimizer(
init_lr=LEARNING_RATE,
num_train_steps=TRAIN_STEPS,
num_warmup_steps=WARMUP_STEPS,
optimizer_type='adamw'
)<train_model> | results = model.predict(X_test)
results = np.argmax(results, axis = 1)
results = pd.Series(results, name="Label" ) | Digit Recognizer |
21,052,731 | <save_to_csv><EOS> | submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"), results],axis = 1)
submission.to_csv("submission.csv",index=False ) | Digit Recognizer |
12,297,004 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric> | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, Dense, Flatten, MaxPool2D, Dropout
from keras.optimizers import Adam
from keras.callbacks import LearningRateScheduler, EarlyStopping, ModelCheckpoint
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import RandomizedSearchCV | Digit Recognizer |
12,297,004 | submission(bert_classifier, test_ds )<load_from_url> | train = pd.read_csv('.. /input/digit-recognizer/train.csv')
print(train.head())
test = pd.read_csv('.. /input/digit-recognizer/test.csv')
print(test.head() ) | Digit Recognizer |
12,297,004 | !wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py
<define_variables> | X_train = train.drop(labels = ['label'], axis = 1)
y_train = train.label
del train | Digit Recognizer |
12,297,004 | SEED = 1002
def seed_everything(seed):
np.random.seed(seed)
tf.random.set_seed(seed)
seed_everything(SEED )<load_from_csv> | X_train = X_train / 255.0
test = test / 255.0 | Digit Recognizer |
12,297,004 | train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
for i, val in enumerate(train.iloc[:2]["text"].to_list()):
print("Tweet {}: {}".format(i+1, val))<categorify> | y_train = to_categorical(y_train, num_classes = 10 ) | Digit Recognizer |
12,297,004 | def bert_encode(texts, tokenizer, max_len=512):
all_tokens = []
all_masks = []
all_segments = []
for text in texts:
text = tokenizer.tokenize(text)
text = text[:max_len-2]
input_sequence = ["[CLS]"] + text + ["[SEP]"]
pad_len = max_len - len(input_sequence)
tokens = tokenizer.convert_tokens_to_ids(input_sequence)
tokens += [0] * pad_len
pad_masks = [1] * len(input_sequence)+ [0] * pad_len
segment_ids = [0] * max_len
all_tokens.append(tokens)
all_masks.append(pad_masks)
all_segments.append(segment_ids)
return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class> | X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size = 0.1,
random_state = 10,
stratify = y_train ) | Digit Recognizer |
12,297,004 | def build_model(bert_layer, max_len=512):
input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids")
_, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
clf_output = sequence_output[:, 0, :]
out = Dense(1, activation='sigmoid' )(clf_output)
model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out)
model.compile(Adam(learning_rate=1e-5), loss='binary_crossentropy', metrics=['accuracy'])
return model<choose_model_class> | annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x, verbose = 0)
nets = 3
model = [0] * nets
for i in range(nets):
model[i] = Sequential()
model[i].add(Conv2D(filters = 32, kernel_size = 5, padding = 'same',
activation = 'relu',
input_shape =(28, 28, 1)))
model[i].add(MaxPool2D())
if i > 0:
model[i].add(Conv2D(filters = 48, kernel_size = 5, padding = 'same',
activation = 'relu'))
model[i].add(MaxPool2D())
if i > 1:
model[i].add(Conv2D(filters = 64, kernel_size = 5, padding = 'same',
activation = 'relu'))
model[i].add(MaxPool2D(padding = 'same'))
model[i].add(Flatten())
model[i].add(Dense(256, activation = 'relu'))
model[i].add(Dense(10, activation = 'softmax'))
model[i].compile(optimizer = 'adam', loss = 'categorical_crossentropy',
metrics = ['accuracy'])
history = [0] * nets
names = ['CNN-1', 'CNN-2', 'CNN-3']
epochs = 20
for i in range(nets):
history[i] = model[i].fit(X_train, y_train, batch_size = 100,
epochs = epochs,
validation_data =(X_val, y_val),
callbacks = [annealer], verbose = 0)
print('{0}: Epochs = {1: d}, Train accuracy = {2:.5f}, Validation accuracy = {3:.5f}'.format(names[i],
epochs, max(history[i].history['accuracy']),
max(history[i].history['val_accuracy'])))
| Digit Recognizer |
12,297,004 | module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1"
bert_layer = hub.KerasLayer(module_url, trainable=True )<categorify> | nets = 7
model = [0] * nets
for i in range(nets):
model[i] = Sequential()
model[i].add(Conv2D(i*8+8, kernel_size = 5,
padding = 'same',
activation = 'relu',
input_shape =(28, 28, 1)))
model[i].add(MaxPool2D())
model[i].add(Conv2D(i*16+16, kernel_size = 5,
padding = 'same',
activation = 'relu'))
model[i].add(MaxPool2D())
model[i].add(Flatten())
model[i].add(Dense(256, activation = 'relu'))
model[i].add(Dense(10, activation = 'softmax'))
model[i].compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
history = [0] * nets
names = []
for i in range(nets):
names.append(i*8+8)
for i in range(nets):
history[i] = model[i].fit(X_train, y_train,
batch_size = 100,
epochs = epochs,
validation_data =(X_val, y_val),
callbacks = [annealer], verbose = 0)
print('CNN {0: d} maps: Epochs = {1: d}, Train accuracy = {2:.5f}, Validation accuracy = {3:.5f}'.format(names[i],
epochs, max(history[i].history['accuracy']), max(history[i].history['val_accuracy'])) ) | Digit Recognizer |
12,297,004 | vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case)
train_input = bert_encode(train.text.values, tokenizer, max_len=160)
test_input = bert_encode(test.text.values, tokenizer, max_len=160)
train_labels = train.target.values<train_model> | nets = 8
model = [0] * nets
for i in range(nets):
model[i] = Sequential()
model[i].add(Conv2D(48, kernel_size = 5,
padding = 'same',
activation = 'relu',
input_shape =(28, 28, 1)))
model[i].add(MaxPool2D())
model[i].add(Conv2D(96, kernel_size = 5,
activation = 'relu'))
model[i].add(MaxPool2D())
model[i].add(Flatten())
if i>0:
model[i].add(Dense(2**(i+4), activation = 'relu'))
model[i].add(Dense(10, activation = 'softmax'))
model[i].compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
history = [0] * nets
names = [0]
for i in range(nets - 1):
names.append(2**(i+5))
for i in range(nets):
history[i] = model[i].fit(X_train, y_train,
batch_size = 100,
epochs = epochs,
validation_data =(X_val, y_val),
callbacks = [annealer], verbose = 0)
print('CNN {0: d}N: Epochs {1: d}, Training accuracy {2:.5f}, Validation accuracy {3:.5f}'.format(names[i],
epochs, max(history[i].history['accuracy']), max(history[i].history['val_accuracy'])) ) | Digit Recognizer |
12,297,004 | checkpoint = ModelCheckpoint('model.h5', monitor='val_accuracy', save_best_only=True)
train_history = model.fit(
train_input, train_labels,
validation_split=0.1,
epochs=3,
callbacks=[checkpoint],
batch_size=16
)<save_to_csv> | nets = 8
model = [0] * nets
names = []
for i, n in enumerate(range(8)) :
names.append(f'{n*10}%')
for i in range(nets):
model[i] = Sequential()
model[i].add(Conv2D(48, kernel_size = 5,
padding = 'same',
activation = 'relu',
input_shape =(28, 28, 1)))
model[i].add(MaxPool2D())
model[i].add(Dropout(i*0.1))
model[i].add(Conv2D(96, kernel_size = 5,
activation = 'relu'))
model[i].add(MaxPool2D())
model[i].add(Dropout(i*0.1))
model[i].add(Flatten())
model[i].add(Dense(256, activation = 'relu'))
model[i].add(Dropout(i*0.1))
model[i].add(Dense(10, activation = 'sigmoid'))
model[i].compile(optimizer = 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
history = [0] * nets
for i in range(nets):
history[i] = model[i].fit(X_train, y_train,
batch_size = 100,
epochs = epochs,
validation_data =(X_val, y_val),
callbacks = [annealer],
verbose = 0)
print('CNN Dropouts = {0}: Epochs = {1: d}, Training accuracy = {2:.5f}, Validation accuracy = {3:.5f}'.format(names[i],
epochs, max(history[i].history['accuracy']), max(history[i].history['val_accuracy'])) ) | Digit Recognizer |
12,297,004 | test_pred = model.predict(test_input)
submission['target'] = test_pred.round().astype(int)
submission.to_csv('submission.csv', index=False )<load_from_csv> | def create_model(optimizer = 'adam', activation = 'relu'):
model = Sequential()
model.add(Conv2D(48, kernel_size = 5,
padding = 'same',
activation = activation,
input_shape =(28, 28, 1)))
model.add(MaxPool2D())
model.add(Dropout(0.4))
model.add(Conv2D(96, kernel_size = 5,
activation = activation))
model.add(MaxPool2D())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(256, activation = activation))
model.add(Dropout(0.4))
model.add(Dense(10, activation = 'sigmoid'))
model.compile(optimizer = optimizer,
loss = 'categorical_crossentropy',
metrics = ['accuracy'])
return model
model = KerasClassifier(build_fn = create_model)
params = dict(optimizer = ['sgd', 'adam'], activation = ['relu', 'tanh'],
batch_size = [50, 100, 150, 200], epochs = [10, 20, 30, 50])
random_search = RandomizedSearchCV(model, param_distributions = params, cv = 3,
n_iter = 10)
random_search_results = random_search.fit(X_train, y_train,
validation_data =(X_val, y_val)) | Digit Recognizer |
12,297,004 | items = pd.read_csv('.. /input/items.csv')
shops = pd.read_csv('.. /input/shops.csv')
cats = pd.read_csv('.. /input/item_categories.csv')
train = pd.read_csv('.. /input/sales_train.csv')
test = pd.read_csv('.. /input/test.csv' ).set_index('ID' )<filter> | print('Best: {0: 5f} using {1}'.format(random_search_results.best_score_,
random_search_results.best_params_)) | Digit Recognizer |
12,297,004 | train = train[train.item_price<100000]
train = train[train.item_cnt_day<1001]<feature_engineering> | early_stopping = EarlyStopping(monitor = 'val_loss', patience = 5,
verbose = 0,
restore_best_weights = True)
annealer = LearningRateScheduler(lambda x: 1e-3 * 0.95 ** x, verbose = 0)
model_checkpoint = ModelCheckpoint('Digit_Recognizer.hdf5', monitor='val_accuracy',
verbose=1, save_best_only=True,
mode='max')
cnn = create_model(optimizer = 'adam', activation = 'relu')
results = cnn.fit(X_train, y_train, validation_data =(X_val, y_val),
batch_size = 50, epochs = 30,
callbacks = [early_stopping, model_checkpoint, annealer] ) | Digit Recognizer |
12,297,004 | <feature_engineering><EOS> | model = load_model('Digit_Recognizer.hdf5')
model.evaluate(X_val, y_val)
pred = model.predict(test)
pred = np.argmax(pred, axis=1)
pred = pd.Series(pred,name="Label")
result = pd.concat([pd.Series(range(1,28001),name = "ImageId"),pred],axis = 1)
result.to_csv("./digit_recognizer.csv",index=False ) | Digit Recognizer |
12,275,253 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify> | %run.. /input/python-recipes/dhtml.py
%run.. /input/python-recipes/load_kaggle_digits.py
%run.. /input/python-recipes/classify_kaggle_digits.py
dhtml('Data Processing' ) | Digit Recognizer |
12,275,253 | shops.loc[shops.shop_name == 'Сергиев Посад ТЦ "7Я"', 'shop_name'] = 'СергиевПосад ТЦ "7Я"'
shops['city'] = shops['shop_name'].str.split(' ' ).map(lambda x: x[0])
shops.loc[shops.city == '!Якутск', 'city'] = 'Якутск'
shops['city_code'] = LabelEncoder().fit_transform(shops['city'])
shops = shops[['shop_id','city_code']]
cats['split'] = cats['item_category_name'].str.split('-')
cats['type'] = cats['split'].map(lambda x: x[0].strip())
cats['type_code'] = LabelEncoder().fit_transform(cats['type'])
cats['subtype'] = cats['split'].map(lambda x: x[1].strip() if len(x)> 1 else x[0].strip())
cats['subtype_code'] = LabelEncoder().fit_transform(cats['subtype'])
cats = cats[['item_category_id','type_code', 'subtype_code']]
items.drop(['item_name'], axis=1, inplace=True )<concatenate> | k=.75; cmap='Pastel1_r'
x_train,y_train,x_valid,y_valid,x_test,y_test,\
test_images,num_classes=\
load_kaggle_digits(k,cmap ) | Digit Recognizer |
12,275,253 | len(list(set(test.item_id)- set(test.item_id ).intersection(set(train.item_id)))) , len(list(set(test.item_id))), len(test )<data_type_conversions> | num_test=100
model_evaluation(cnn_model,x_test,y_test,
weights,color,num_test ) | Digit Recognizer |
12,275,253 | <feature_engineering><EOS> | cnn_model.load_weights(weights)
predict_test_labels=\
cnn_model.predict_classes(test_images)
submission=pd.DataFrame(
{'ImageId':range(1,len(predict_test_labels)+1),
'Label':predict_test_labels})
submission.to_csv('kaggle_digits.csv',index=False)
fig=pl.figure(figsize=(10,6))
for i in range(15):
ax=fig.add_subplot(3,5,i+1,xticks=[],yticks=[])
ax.imshow(test_images[i].reshape(28,28),
cmap=pl.get_cmap('Pastel1'))
ax.set_title('%d'%submission['Label'][i],
fontsize=10)
pl.show() | Digit Recognizer |
12,120,426 | <SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<merge> | random_seed = 2
%matplotlib inline
np.random.seed(2)
sns.set(style='white', context='notebook', palette='deep' ) | Digit Recognizer |
12,120,426 | ts = time.time()
group = train.groupby(['date_block_num','shop_id','item_id'] ).agg({'item_cnt_day': ['sum']})
group.columns = ['item_cnt_month']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=cols, how='left')
matrix['item_cnt_month'] =(matrix['item_cnt_month']
.fillna(0)
.clip(0,20)
.astype(np.float16))
time.time() - ts<data_type_conversions> | X = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
Y = X["label"]
X.drop("label",axis = 1, inplace = True)
| Digit Recognizer |
12,120,426 | test['date_block_num'] = 34
test['date_block_num'] = test['date_block_num'].astype(np.int8)
test['shop_id'] = test['shop_id'].astype(np.int8)
test['item_id'] = test['item_id'].astype(np.int16 )<concatenate> | np.sum(X.isnull().any() ) | Digit Recognizer |
12,120,426 | ts = time.time()
matrix = pd.concat([matrix, test], ignore_index=True, sort=False, keys=cols)
matrix.fillna(0, inplace=True)
time.time() - ts<data_type_conversions> | sum(test.isnull().any() ) | Digit Recognizer |
12,120,426 | ts = time.time()
matrix = pd.merge(matrix, shops, on=['shop_id'], how='left')
matrix = pd.merge(matrix, items, on=['item_id'], how='left')
matrix = pd.merge(matrix, cats, on=['item_category_id'], how='left')
matrix['city_code'] = matrix['city_code'].astype(np.int8)
matrix['item_category_id'] = matrix['item_category_id'].astype(np.int8)
matrix['type_code'] = matrix['type_code'].astype(np.int8)
matrix['subtype_code'] = matrix['subtype_code'].astype(np.int8)
time.time() - ts<merge> | def get_model(optim, loss):
model = Sequential()
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu', input_shape =(28,28,1)))
model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same',
activation ='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation = "softmax"))
model.compile(optimizer= optim, loss=loss, metrics=['accuracy'])
return model
def normalize(X, Y = None):
X = X / 255.0
X = X.values.reshape(-1,28,28,1)
if Y is not None:
Y = Y.values.reshape(-1,1)
return X,Y
def get_samples(X, Y, encoder, val_size = 0.9):
X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size = val_size, random_state=42, stratify= Y)
X_train = np.vstack([X_train,X_train,X_train])
y_train = np.concatenate([y_train, y_train, y_train])
idx = np.arange(0, X_train.shape[0])
np.random.shuffle(idx)
X_train = X_train[idx,:]
y_train = y_train[idx]
X_train = X_train.reshape(-1,28,28,1)
X_val = X_val.reshape(-1,28,28,1)
y_train = encoder.transform(y_train ).todense()
y_val = encoder.transform(y_val ).todense()
return X_train,X_val,y_train,y_val | Digit Recognizer |
12,120,426 | def lag_feature(df, lags, col):
tmp = df[['date_block_num','shop_id','item_id',col]]
for i in lags:
shifted = tmp.copy()
shifted.columns = ['date_block_num','shop_id','item_id', col+'_lag_'+str(i)]
shifted['date_block_num'] += i
df = pd.merge(df, shifted, on=['date_block_num','shop_id','item_id'], how='left')
return df<merge> | X_train,Y_train = normalize(X,Y)
X_test,_ = normalize(test)
encoder = OneHotEncoder()
encoder.fit(Y_train ) | Digit Recognizer |
12,120,426 | ts = time.time()
group = matrix.groupby(['date_block_num'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num'], how='left')
matrix['date_avg_item_cnt'] = matrix['date_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_avg_item_cnt')
matrix.drop(['date_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | data_generator = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=10,
zoom_range = 0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=False,
vertical_flip=False)
| Digit Recognizer |
12,120,426 | ts = time.time()
group = matrix.groupby(['date_block_num', 'item_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_cnt'] = matrix['date_item_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3,6,12], 'date_item_avg_item_cnt')
matrix.drop(['date_item_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | EPOCHS = 50
BATCH_SIZE = 20
ENSEMBLES = 7
results = np.zeros(( test.shape[0],10))
histories = []
models = []
callback_list = [
ReduceLROnPlateau(monitor='val_loss', factor=0.25, min_lr=0.00001, patience=2, verbose=1),
EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=3, verbose=1)
]
optimizer = Adam(learning_rate=0.001)
loss_fn = CategoricalCrossentropy() | Digit Recognizer |
12,120,426 | ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_shop_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','shop_id'], how='left')
matrix['date_shop_avg_item_cnt'] = matrix['date_shop_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1,2,3,6,12], 'date_shop_avg_item_cnt')
matrix.drop(['date_shop_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | for i in range(ENSEMBLES):
X_train_tmp, X_val, y_train_tmp, y_val = get_samples(X_train, Y_train, encoder)
data_generator.fit(X_train_tmp)
models.append(get_model(optimizer, loss_fn))
history = models[i].fit_generator(data_generator.flow(X_train_tmp, y_train_tmp, batch_size=BATCH_SIZE),
epochs=EPOCHS,
callbacks=[callback_list],
validation_data=(X_val, y_val),
steps_per_epoch=X_train_tmp.shape[0] // BATCH_SIZE,
use_multiprocessing=True,
verbose = 2)
histories.append(history ) | Digit Recognizer |
12,120,426 | ts = time.time()
group = matrix.groupby(['date_block_num', 'item_category_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_cat_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','item_category_id'], how='left')
matrix['date_cat_avg_item_cnt'] = matrix['date_cat_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_cat_avg_item_cnt')
matrix.drop(['date_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | results = np.zeros(( X_test.shape[0],10))
for i in range(ENSEMBLES):
results = results + models[i].predict(X_test)
results = np.argmax(results, axis = 1 ) | Digit Recognizer |
12,120,426 | ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'item_category_id'] ).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_cat_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'item_category_id'], how='left')
matrix['date_shop_cat_avg_item_cnt'] = matrix['date_shop_cat_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_cat_avg_item_cnt')
matrix.drop(['date_shop_cat_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | submission = pd.DataFrame({"ImageID": range(1,len(results)+1), "Label": results})
submission.to_csv('submission.csv', index=False ) | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'type_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_type_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'type_code'], how='left')
matrix['date_shop_type_avg_item_cnt'] = matrix['date_shop_type_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_type_avg_item_cnt')
matrix.drop(['date_shop_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | data_dir='/kaggle/input/digit-recognizer/' | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'shop_id', 'subtype_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = ['date_shop_subtype_avg_item_cnt']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'shop_id', 'subtype_code'], how='left')
matrix['date_shop_subtype_avg_item_cnt'] = matrix['date_shop_subtype_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_shop_subtype_avg_item_cnt')
matrix.drop(['date_shop_subtype_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | train=pd.read_csv(data_dir+'train.csv')
test=pd.read_csv(data_dir+'test.csv' ) | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'city_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_city_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'city_code'], how='left')
matrix['date_city_avg_item_cnt'] = matrix['date_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_city_avg_item_cnt')
matrix.drop(['date_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | train['label'].value_counts() | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'item_id', 'city_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_item_city_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'item_id', 'city_code'], how='left')
matrix['date_item_city_avg_item_cnt'] = matrix['date_item_city_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_item_city_avg_item_cnt')
matrix.drop(['date_item_city_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | train.isnull().sum() | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'type_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_type_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'type_code'], how='left')
matrix['date_type_avg_item_cnt'] = matrix['date_type_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_type_avg_item_cnt')
matrix.drop(['date_type_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<merge> | train.isnull().sum() | Digit Recognizer |
11,955,886 | ts = time.time()
group = matrix.groupby(['date_block_num', 'subtype_code'] ).agg({'item_cnt_month': ['mean']})
group.columns = [ 'date_subtype_avg_item_cnt' ]
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num', 'subtype_code'], how='left')
matrix['date_subtype_avg_item_cnt'] = matrix['date_subtype_avg_item_cnt'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'date_subtype_avg_item_cnt')
matrix.drop(['date_subtype_avg_item_cnt'], axis=1, inplace=True)
time.time() - ts<data_type_conversions> | features=[i for i in train.columns]
k=0
for feature in features:
if train[feature].isnull().sum() ==0:
k=k+1
else:
print('{0} has {1} null values'.format(feature,train[feature].isnull().sum()))
if(k==train.shape[1]):
print('no nan's in the train dataset,so proceed')
| Digit Recognizer |
11,955,886 | ts = time.time()
group = train.groupby(['item_id'] ).agg({'item_price': ['mean']})
group.columns = ['item_avg_item_price']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['item_id'], how='left')
matrix['item_avg_item_price'] = matrix['item_avg_item_price'].astype(np.float16)
group = train.groupby(['date_block_num','item_id'] ).agg({'item_price': ['mean']})
group.columns = ['date_item_avg_item_price']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','item_id'], how='left')
matrix['date_item_avg_item_price'] = matrix['date_item_avg_item_price'].astype(np.float16)
lags = [1,2,3,4,5,6]
matrix = lag_feature(matrix, lags, 'date_item_avg_item_price')
for i in lags:
matrix['delta_price_lag_'+str(i)] = \
(matrix['date_item_avg_item_price_lag_'+str(i)] - matrix['item_avg_item_price'])/ matrix['item_avg_item_price']
def select_trend(row):
for i in lags:
if row['delta_price_lag_'+str(i)]:
return row['delta_price_lag_'+str(i)]
return 0
matrix['delta_price_lag'] = matrix.apply(select_trend, axis=1)
matrix['delta_price_lag'] = matrix['delta_price_lag'].astype(np.float16)
matrix['delta_price_lag'].fillna(0, inplace=True)
fetures_to_drop = ['item_avg_item_price', 'date_item_avg_item_price']
for i in lags:
fetures_to_drop += ['date_item_avg_item_price_lag_'+str(i)]
fetures_to_drop += ['delta_price_lag_'+str(i)]
matrix.drop(fetures_to_drop, axis=1, inplace=True)
time.time() - ts<merge> | features=[i for i in test.columns]
k=0
for feature in features:
if test[feature].isnull().sum() ==0:
k=k+1
else:
print('{0} has {1} null values'.format(feature,test[feature].isnull().sum()))
if(k==test.shape[1]):
print('no nan's in the test dataset,so proceed' ) | Digit Recognizer |
11,955,886 | ts = time.time()
group = train.groupby(['date_block_num','shop_id'] ).agg({'revenue': ['sum']})
group.columns = ['date_shop_revenue']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['date_block_num','shop_id'], how='left')
matrix['date_shop_revenue'] = matrix['date_shop_revenue'].astype(np.float32)
group = group.groupby(['shop_id'] ).agg({'date_shop_revenue': ['mean']})
group.columns = ['shop_avg_revenue']
group.reset_index(inplace=True)
matrix = pd.merge(matrix, group, on=['shop_id'], how='left')
matrix['shop_avg_revenue'] = matrix['shop_avg_revenue'].astype(np.float32)
matrix['delta_revenue'] =(matrix['date_shop_revenue'] - matrix['shop_avg_revenue'])/ matrix['shop_avg_revenue']
matrix['delta_revenue'] = matrix['delta_revenue'].astype(np.float16)
matrix = lag_feature(matrix, [1], 'delta_revenue')
matrix.drop(['date_shop_revenue','shop_avg_revenue','delta_revenue'], axis=1, inplace=True)
time.time() - ts<feature_engineering> | train_y=train['label']
train_x=train.drop('label',axis=1 ) | Digit Recognizer |
11,955,886 | matrix['month'] = matrix['date_block_num'] % 12<categorify> | def image_printer(i,train_x):
idx=i
grid_data=train_x.iloc[idx].to_numpy().reshape(28,28 ).astype('uint8')
plt.imshow(grid_data)
| Digit Recognizer |
11,955,886 | days = pd.Series([31,28,31,30,31,30,31,31,30,31,30,31])
matrix['days'] = matrix['month'].map(days ).astype(np.int8 )<data_type_conversions> | label.value_counts() | Digit Recognizer |
11,955,886 | ts = time.time()
cache = {}
matrix['item_shop_last_sale'] = -1
matrix['item_shop_last_sale'] = matrix['item_shop_last_sale'].astype(np.int8)
for idx, row in matrix.iterrows() :
key = str(row.item_id)+' '+str(row.shop_id)
if key not in cache:
if row.item_cnt_month!=0:
cache[key] = row.date_block_num
else:
last_date_block_num = cache[key]
matrix.at[idx, 'item_shop_last_sale'] = row.date_block_num - last_date_block_num
cache[key] = row.date_block_num
time.time() - ts<data_type_conversions> | std_data=StandardScaler().fit_transform(data)
print(np.mean(std_data))
print(np.std(std_data)) | Digit Recognizer |
11,955,886 | ts = time.time()
cache = {}
matrix['item_last_sale'] = -1
matrix['item_last_sale'] = matrix['item_last_sale'].astype(np.int8)
for idx, row in matrix.iterrows() :
key = row.item_id
if key not in cache:
if row.item_cnt_month!=0:
cache[key] = row.date_block_num
else:
last_date_block_num = cache[key]
if row.date_block_num>last_date_block_num:
matrix.at[idx, 'item_last_sale'] = row.date_block_num - last_date_block_num
cache[key] = row.date_block_num
time.time() - ts<feature_engineering> | temp_data=std_data
covar_matrix=np.matmul(temp_data.T,temp_data)
print(f'shape of covar matrix is {covar_matrix.shape}')
print(f'shape of my data is {temp_data.shape}' ) | Digit Recognizer |
11,955,886 | ts = time.time()
matrix['item_shop_first_sale'] = matrix['date_block_num'] - matrix.groupby(['item_id','shop_id'])['date_block_num'].transform('min')
matrix['item_first_sale'] = matrix['date_block_num'] - matrix.groupby('item_id')['date_block_num'].transform('min')
time.time() - ts<filter> | eigh_values,eigh_vectors=eigh(covar_matrix,eigvals=(782,783))
print(f'shape of eigen vectors {eigh_vectors.shape}' ) | Digit Recognizer |
11,955,886 | ts = time.time()
matrix = matrix[matrix.date_block_num > 11]
time.time() - ts<correct_missing_values> | pca_points=np.matmul(temp_data,eigh_vectors)
print(f'shape of my pca points is {pca_points.shape}' ) | Digit Recognizer |
11,955,886 | ts = time.time()
def fill_na(df):
for col in df.columns:
if('_lag_' in col)&(df[col].isnull().any()):
if('item_cnt' in col):
df[col].fillna(0, inplace=True)
return df
matrix = fill_na(matrix)
time.time() - ts<load_pretrained> | pca_data=np.vstack(( pca_points.T,label)).T
print(f'shape of my pca data is {pca_data.shape}')
pca_dataframe=pd.DataFrame(data=pca_data,columns=('1st Principal comp','2nd Principal comp','label'))
print(pca_dataframe.head(10)) | Digit Recognizer |
11,955,886 | matrix.to_pickle('data.pkl')
del matrix
del cache
del group
del items
del shops
del cats
del train
gc.collect() ;<load_pretrained> | pca=decomposition.PCA() | Digit Recognizer |
11,955,886 | data = pd.read_pickle('data.pkl' )<drop_column> | from sklearn.manifold import TSNE | Digit Recognizer |
11,955,886 | data = data[[
'date_block_num',
'shop_id',
'item_id',
'item_cnt_month',
'city_code',
'item_category_id',
'type_code',
'subtype_code',
'item_cnt_month_lag_1',
'item_cnt_month_lag_2',
'item_cnt_month_lag_3',
'item_cnt_month_lag_6',
'item_cnt_month_lag_12',
'date_avg_item_cnt_lag_1',
'date_item_avg_item_cnt_lag_1',
'date_item_avg_item_cnt_lag_2',
'date_item_avg_item_cnt_lag_3',
'date_item_avg_item_cnt_lag_6',
'date_item_avg_item_cnt_lag_12',
'date_shop_avg_item_cnt_lag_1',
'date_shop_avg_item_cnt_lag_2',
'date_shop_avg_item_cnt_lag_3',
'date_shop_avg_item_cnt_lag_6',
'date_shop_avg_item_cnt_lag_12',
'date_cat_avg_item_cnt_lag_1',
'date_shop_cat_avg_item_cnt_lag_1',
'date_city_avg_item_cnt_lag_1',
'date_item_city_avg_item_cnt_lag_1',
'delta_price_lag',
'month',
'days',
'item_shop_last_sale',
'item_last_sale',
'item_shop_first_sale',
'item_first_sale',
]]<prepare_x_and_y> | temp_data1=std_data
pca.n_components=450
pca_data=pca.fit_transform(temp_data1)
pca_data_new=np.vstack(( pca_data1.T,label)).T
new_data=pca_data | Digit Recognizer |
11,955,886 | X_train = data[data.date_block_num < 33].drop(['item_cnt_month'], axis=1)
Y_train = data[data.date_block_num < 33]['item_cnt_month']
X_valid = data[data.date_block_num == 33].drop(['item_cnt_month'], axis=1)
Y_valid = data[data.date_block_num == 33]['item_cnt_month']
X_test = data[data.date_block_num == 34].drop(['item_cnt_month'], axis=1 )<set_options> | from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score,cross_val_predict
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
from scipy.stats import uniform,truncnorm,randint | Digit Recognizer |
11,955,886 | del data
gc.collect() ;<train_model> | clf=LogisticRegression(multi_class='multinomial',n_jobs=-1)
cv_scores=cross_val_score(clf,new_data,label,cv=10)
print(f'we can expect accuracy between {cv_scores.min() } and {cv_scores.max() } with avg accuracy of {cv_scores.mean() }' ) | Digit Recognizer |
11,955,886 | ts = time.time()
model = XGBRegressor(
max_depth=8,
n_estimators=1000,
min_child_weight=300,
colsample_bytree=0.8,
subsample=0.8,
eta=0.3,
seed=42)
model.fit(
X_train,
Y_train,
eval_metric="rmse",
eval_set=[(X_train, Y_train),(X_valid, Y_valid)],
verbose=True,
early_stopping_rounds = 10)
time.time() - ts<save_to_csv> | class_pred=cross_val_predict(clf,new_data,label,cv=10)
accLR=accuracy_score(label,class_pred)
print(f'accuracy of our model is {accLR}')
print(f' confusion matrix is ')
print(confusion_matrix(label,class_pred))
print(f'classification report is ')
print(classification_report(label,class_pred)) | Digit Recognizer |
11,955,886 | Y_pred = model.predict(X_valid ).clip(0, 20)
Y_test = model.predict(X_test ).clip(0, 20)
submission = pd.DataFrame({
"ID": test.index,
"item_cnt_month": Y_test
})
submission.to_csv('xgb_submission.csv', index=False)
pickle.dump(Y_pred, open('xgb_train.pickle', 'wb'))
pickle.dump(Y_test, open('xgb_test.pickle', 'wb'))<define_variables> | for kernel in('poly','rbf'):
clf=svm.SVC(kernel=kernel)
cv_scores=cross_val_score(clf,new_data,label,n_jobs=-1,cv=10)
print(f'accuracy of {kernel} kernel varies between {cv_scores.min() } and {cv_scores.max() } with mean {cv_scores.mean() }')
| Digit Recognizer |
11,955,886 |
<set_options> | for C in(1,10,100):
clf=svm.SVC(kernel='rbf',C=C)
cv_scores=cross_val_score(clf,new_data,label,n_jobs=-1,cv=10)
print(f'accuracy of rbf kernel with C={C} varies between {cv_scores.min() } and {cv_scores.max() } with mean {cv_scores.mean() }')
| Digit Recognizer |
11,955,886 | !pip install pyunpack
!pip install patool
os.environ["CUDA_VISIBLE_DEVICES"]='0,1'
os.system('apt-get install p7zip')
tf.random.set_seed(9)
<import_modules> | for gamma in(0.001,0.01):
clf=svm.SVC(kernel='rbf',C=10,gamma=gamma)
cv_scores=cross_val_score(clf,new_data,label,n_jobs=-1,cv=10)
print(f'accuracy of rbf kernel with C=10 and gamma={gamma} varies between {cv_scores.min() } and {cv_scores.max() } with mean {cv_scores.mean() }')
| Digit Recognizer |
11,955,886 | tf.__version__<set_options> | clf=svm.SVC(kernel='rbf',C=10,gamma=0.001)
cv_scores=cross_val_score(clf,new_data,label,n_jobs=-1,cv=10)
print(f'accuracy of rbf kernel with C=10 and gamma= 0.001 varies between {cv_scores.min() } and {cv_scores.max() } with mean {cv_scores.mean() }' ) | Digit Recognizer |
11,955,886 | device_lib.list_local_devices()<define_variables> | class_pred=cross_val_predict(clf,new_data,label,cv=10)
accSVM=accuracy_score(label,class_pred)
print(f'accuracy of our model is {accSVM}')
print(f' confusion matrix is ')
print(confusion_matrix(label,class_pred))
print(f'classification report is ')
print(classification_report(label,class_pred)) | Digit Recognizer |
11,955,886 | root_path = "/kaggle"<load_pretrained> | clf=KNeighborsClassifier(n_neighbors=5,n_jobs=-1)
cv_score=cross_val_score(clf,new_data,label,cv=10)
print(f' we can expect accuracy bwetween {cv_score.min() } and {cv_score.max() } with a mean of {cv_score.mean() }')
| Digit Recognizer |
11,955,886 | if not os.path.exists(root_path + '/working/train/'):
os.makedirs(root_path + '/working/train/')
Archive(root_path + '/input/tensorflow-speech-recognition-challenge/train.7z' ).extractall(root_path + '/working')
train_path = root_path + '/working/train/audio/'<load_pretrained> | class_pred=cross_val_predict(clf,new_data,label,cv=10)
accKNN=accuracy_score(label,class_pred)
print(f'accuracy of our model is {accKNN}')
print(f' confusion matrix is ')
print(confusion_matrix(label,class_pred))
print(f'classification report is ')
print(classification_report(label,class_pred)) | Digit Recognizer |
11,955,886 | train_audio_sample = os.path.join(train_path, "yes/0a7c2a8d_nohash_0.wav")
x,sr = librosa.load(train_audio_sample, sr = 16000)
ipd.Audio(x, rate=sr )<normalization> | clf=DecisionTreeClassifier(max_depth=best_n,random_state=0)
cv_score=cross_val_score(clf,new_data,label,cv=10,scoring='accuracy',n_jobs=-1)
print(f' with {best_n} as depth of trees we can expect accuracy bwetween {cv_score.min() } and {cv_score.max() } with a mean of {cv_score.mean() }' ) | Digit Recognizer |
11,955,886 | mfccs = librosa.feature.mfcc(x, sr=sr, n_mfcc=40)
scaler = StandardScaler()
ee= scaler.fit_transform(mfccs.T)
plt.figure(figsize=(14, 5))
librosa.display.specshow(ee.T )<categorify> | clf=DecisionTreeClassifier(max_depth=best_n,random_state=0)
clf.fit(new_data,label ) | Digit Recognizer |
11,955,886 | def pad_audio(samples, L):
if len(samples)>= L:
return samples
else:
return np.pad(samples, pad_width=(L - len(samples), 0), mode='constant', constant_values=(0, 0))
def chop_audio(samples, L=16000):
while True:
beg = np.random.randint(0, len(samples)- L)
yield samples[beg: beg + L]
def choose_background_generator(sound, backgrounds, max_alpha = 0.7):
if backgrounds is None:
return sound
my_gen = backgrounds[np.random.randint(len(backgrounds)) ]
background = next(my_gen)* np.random.uniform(0, max_alpha)
augmented_data = sound + background
augmented_data = augmented_data.astype(type(sound[0]))
return augmented_data
def random_shift(sound, shift_max = 0.2, sampling_rate = 16000):
shift = np.random.randint(sampling_rate * shift_max)
out = np.roll(sound, shift)
if shift > 0:
out[:shift] = 0
else:
out[shift:] = 0
return out
def random_change_pitch(x, sr=16000):
pitch_factor = np.random.randint(1, 4)
out = librosa.effects.pitch_shift(x, sr, pitch_factor)
return out
def random_speed_up(x):
where = ["start", "end"][np.random.randint(0, 1)]
speed_factor = np.random.uniform(0, 0.5)
up = librosa.effects.time_stretch(x, 1 + speed_factor)
up_len = up.shape[0]
if where == "end":
up = np.concatenate(( up, np.zeros(( x.shape[0] - up_len,))))
else:
up = np.concatenate(( np.zeros(( x.shape[0] - up_len,)) , up))
return up
def get_image_list(train_audio_path):
classes = os.listdir(train_audio_path)
classes = [thisclass for thisclass in classes if thisclass != '_background_noise_']
index = [i for i,j in enumerate(classes)]
outlist = []
labels = []
for thisindex,thisclass in zip(index, classes):
filelist = [f for f in os.listdir(os.path.join(train_audio_path, thisclass)) if f.endswith('.wav')]
filelist = [os.path.join(train_audio_path, thisclass, x)for x in filelist]
outlist.append(filelist)
labels.append(np.full(len(filelist), fill_value= thisindex))
return outlist,labels,dict(zip(classes,index))
def split_train_test_stratified_shuffle(images_list, labels, train_size = 0.9):
classes_size = [len(x)for x in images_list]
classes_vector = [np.arange(x)for x in classes_size]
total = np.sum(classes_size)
total_train = [int(train_size * total * x)for x in classes_size / total]
train_index = [np.random.choice(x,y,replace=False)for x,y in zip(classes_size,total_train)]
validation_index = [np.setdiff1d(i,j)for i,j in zip(classes_vector,train_index)]
train_set = [np.array(x)[idx] for x,idx in zip(images_list,train_index)]
validation_set = [np.array(x)[idx] for x,idx in zip(images_list,validation_index)]
train_labels = [np.array(x)[idx] for x,idx in zip(labels,train_index)]
validation_labels = [np.array(x)[idx] for x,idx in zip(labels,validation_index)]
train_set = np.array([element for array in train_set for element in array])
validation_set = np.array([element for array in validation_set for element in array])
train_labels = np.array([element for array in train_labels for element in array])
validation_labels = np.array([element for array in validation_labels for element in array])
train_shuffle = np.random.permutation(len(train_set))
validation_shuffle = np.random.permutation(len(validation_set))
train_set = train_set[train_shuffle]
validation_set = validation_set[validation_shuffle]
train_labels = train_labels[train_shuffle]
validation_labels = validation_labels[validation_shuffle]
return train_set,train_labels,validation_set,validation_labels
def preprocess_data(file, background_generator, target_sr = 16000, n_mfcc = 40, threshold = 0.7):
x,sr = librosa.load(file, sr = target_sr)
x = pad_audio(x, sr)
if np.random.uniform(0, 1)> threshold:
x = choose_background_generator(x, background_generator)
if np.random.uniform(0, 1)> threshold:
x = random_shift(x)
if np.random.uniform(0, 1)> threshold:
x = random_change_pitch(x)
if np.random.uniform(0, 1)> threshold:
x = random_speed_up(x)
mfccs = librosa.feature.mfcc(x, sr=sr, n_mfcc=n_mfcc)
mfccs = np.moveaxis(mfccs, 1, 0)
scaler = StandardScaler()
mfccs_scaled = scaler.fit_transform(mfccs)
return mfccs_scaled.reshape(mfccs_scaled.shape[0], mfccs_scaled.shape[1], 1)
class data_generator(Sequence):
def __init__(self, x_set, y_set, batch_size, background_generator):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.background_generator = background_generator
def __len__(self):
return math.ceil(len(self.x)/ self.batch_size)
def __getitem__(self, idx):
idx_from = idx * self.batch_size
idx_to =(idx + 1)* self.batch_size
batch_x = self.x[idx_from:idx_to]
batch_y = self.y[idx_from:idx_to]
x = [preprocess_data(elem, self.background_generator)for elem in batch_x]
y = batch_y
return np.array(x), np.array(y)
def build_model(n_classes, input_shape):
model_input = keras.Input(shape=input_shape)
img_1 = layers.Convolution2D(filters = 32, kernel_size =(3,3), padding = "same", activation=activations.relu )(model_input)
img_1 = layers.MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = layers.Convolution2D(filters = 64, kernel_size =(3,3), padding = "same", activation=activations.relu )(img_1)
img_1 = layers.MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = layers.Convolution2D(filters = 128, kernel_size =(3,3), padding = "same", activation=activations.relu )(img_1)
img_1 = layers.MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = layers.Convolution2D(filters = 256, kernel_size =(3,3), padding = "same", activation=activations.relu )(img_1)
img_1 = layers.MaxPooling2D(pool_size=(2, 2))(img_1)
img_1 = layers.Dropout(rate=0.25 )(img_1)
img_1 = layers.Flatten()(img_1)
img_1 = layers.Dense(128, activation=activations.relu )(img_1)
img_1 = layers.Dropout(rate=0.5 )(img_1)
model_output = layers.Dense(n_classes, activation=activations.softmax )(img_1)
model = keras.Model(model_input, model_output)
return model
def multiclass_roc(y_test, y_pred, average="macro"):
lb = LabelBinarizer()
lb.fit(y_test)
y_test = lb.transform(y_test)
y_pred = lb.transform(y_pred)
all_labels = np.unique(y_test)
for(idx, c_label)in enumerate(all_labels):
fpr, tpr, thresholds = roc_curve(y_test[:,idx].astype(int), y_pred[:,idx])
c_ax.plot(fpr, tpr, label = '%s(AUC:%0.2f)' %(c_label, auc(fpr, tpr)))
c_ax.plot(fpr, fpr, 'b-', label = 'Random Guessing')
return roc_auc_score(y_test, y_pred, average=average )<load_pretrained> | cn=['0','1','2','3','4','5','6','7','8','9']
dot_data = tree.export_graphviz(clf, out_file=None,
class_names=cn,
filled=True)
graph = graphviz.Source(dot_data,format="png")
graph
| Digit Recognizer |
11,955,886 | wavfiles = glob.glob(os.path.join(train_path, "_background_noise_/*wav"))
wavfiles = [librosa.load(elem, sr = 16000)[0] for elem in wavfiles]
background_generator = [chop_audio(x)for x in wavfiles]<split> | class_pred=cross_val_predict(clf,new_data,label,cv=10)
accDT=accuracy_score(label,class_pred)
print(f'accuracy of our model is {accDT}')
print(f' confusion matrix is ')
print(confusion_matrix(label,class_pred))
print(f'classification report is ')
print(classification_report(label,class_pred)) | Digit Recognizer |
11,955,886 | images_list,labels,classes_map = get_image_list(train_path)
train_set,train_labels,validation_set,validation_labels = split_train_test_stratified_shuffle(images_list,labels)
train_datagen = data_generator(train_set, train_labels, 40, background_generator)
validation_datagen = data_generator(validation_set, validation_labels,40, None )<normalization> | clf=RandomForestClassifier(n_estimators=600,max_depth=best_n,random_state=0,n_jobs=-1)
cv_score=cross_val_score(clf,new_data,label,cv=10,scoring='accuracy',n_jobs=-1)
print(f' with {best_n} as depth of trees and {600} as no of trees we can expect accuracy bwetween {cv_score.min() } and {cv_score.max() } with a mean of {cv_score.mean() }' ) | Digit Recognizer |
11,955,886 | start_random = random_shift(x)
ipd.Audio(start_random , rate=sr )<normalization> | class_pred=cross_val_predict(clf,new_data,label,cv=10)
accRF=accuracy_score(label,class_pred)
print(f'accuracy of our model is {accRF}')
print(f' confusion matrix is ')
print(confusion_matrix(label,class_pred))
print(f'classification report is ')
print(classification_report(label,class_pred)) | Digit Recognizer |
11,955,886 | higher_speed = random_speed_up(x)
ipd.Audio(higher_speed , rate=sr )<normalization> | import tensorflow as tf
import keras
from keras import backend as k
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation, BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model
from keras.callbacks import EarlyStopping | Digit Recognizer |
11,955,886 | pitch_changed = random_change_pitch(x)
ipd.Audio(pitch_changed, rate=sr )<define_variables> | test_x=test | Digit Recognizer |
11,955,886 | inv_map = {v: k for k, v in classes_map.items() }
any_present=[i in validation_set for i in train_set]
np.any(any_present )<count_unique_values> | img_cols=28
img_rows=28 | Digit Recognizer |
11,955,886 | test1 = np.random.randint(10, 100, 10)
validation_set[test1],[inv_map[int(i)] for i in validation_labels[test1]]<merge> | if k.image_data_format=='channels_first':
train_x = train_x.values.reshape(train_x.shape[0], 1,img_cols,img_rows)
test = test.values.reshape(test.shape[0], 1,img_cols,img_rows)
train_x=train_x/255.0
test=test/255.0
input_shape =(1,img_cols,img_rows)
else:
train_x=train_x.values.reshape(train_x.shape[0],img_cols,img_rows,1)
test=test.values.reshape(test.shape[0],img_cols,img_rows,1)
train_x=train_x/255.0
test=test/255.0
input_shape =(img_cols,img_rows,1 ) | Digit Recognizer |
11,955,886 | out2.join(out, lsuffix='_left', rsuffix='_right')[:5]<compute_test_metric> | earlystopping = EarlyStopping(monitor ="val_accuracy",
mode = 'auto', patience = 10,
restore_best_weights = True)
modelacc = []
nfilters = [32, 64, 128,256]
conv_layers = [1, 2, 3,4]
dense_layers = [0, 1, 2,3]
dp=0.5
for filters in nfilters:
for conv_layer in conv_layers:
for dense_layer in dense_layers:
cnnsays = 'No of filters/feature maps: {} conv_layers: {} dense_layers: {} dropout: {}'.format(filters, conv_layer, dense_layer, dp)
print(cnnsays)
model = Sequential()
model.add(Conv2D(filters,(3, 3), input_shape = input_shape))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dp))
for i in range(conv_layer-1):
model.add(Conv2D(filters,(3, 3),padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dp))
model.add(Flatten())
for i in range(dense_layer):
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dp))
model.add(Dense(10, activation='softmax'))
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
EPOCHS = 30
history = model.fit(train_x, train_y, epochs=EPOCHS, batch_size=32, validation_split=0.2, callbacks=[earlystopping])
modelacc.append([round(100*max(history.history['val_accuracy']), 2), cnnsays])
| Digit Recognizer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.