kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
2,617,477
classes = np.unique(train_df["target"]) class_weights = sklearn.utils.class_weight.compute_class_weight( "balanced", classes=classes, y=train_df["target"] ) class_weights = {clazz : weight for clazz, weight in zip(classes, class_weights)}<count_duplicates>
iters = 100 batch_size = 1024
Digit Recognizer
2,617,477
train_df.drop_duplicates(subset="text", inplace=True, keep=False) print("train rows:", len(train_df.index)) print("test rows:", len(test_df.index))<categorify>
lr_decay = ReduceLROnPlateau(monitor="val_acc", factor=0.5, patience=3, verbose=1, min_lr=1e-5 )
Digit Recognizer
2,617,477
class TweetPreProcessor: def __init__(self): self.text_processor = TextPreProcessor( normalize=[ "url", "email", "phone", "user", "time", "date", ], annotate={"repeated", "elongated"}, segmenter="twitter", spell_correction=True, corrector="twitter", unpack_hashtags=False, unpack_contractions=False, spell_correct_elong=True, fix_bad_unicode=True, tokenizer=Tokenizer(lowercase=True ).tokenize, dicts=[emoticons, slangdict], ) def preprocess_tweet(self, tweet): return " ".join(self.text_processor.pre_process_doc(tweet)) def __call__(self, tweet): return self.text_processor.pre_process_doc(tweet) tweet_preprocessor = TweetPreProcessor()<categorify>
early_stopping = EarlyStopping(monitor="val_acc", patience=7, verbose=1 )
Digit Recognizer
2,617,477
for tweet in train_df[100:120]["text"]: print("original: ", tweet) print("processed: ", tweet_preprocessor.preprocess_tweet(tweet)) print("" )<categorify>
print("Training model...") fit_params = { "batch_size": batch_size, "epochs": iters, "verbose": 1, "callbacks": [lr_decay, early_stopping], "validation_data":(x_dev, y_dev) } history = model.fit(x_train, y_train, **fit_params) print("Done!" )
Digit Recognizer
2,617,477
train_df["text"] = train_df["text"].apply(tweet_preprocessor.preprocess_tweet) test_df["text"] = test_df["text"].apply(tweet_preprocessor.preprocess_tweet )<feature_engineering>
loss, acc = model.evaluate(x_dev, y_dev) print("Validation loss: {:.4f}".format(loss)) print("Validation accuracy: {:.4f}".format(acc))
Digit Recognizer
2,617,477
<split><EOS>
y_pred = model.predict(x_test, batch_size=batch_size) y_pred = np.argmax(y_pred, axis=1 ).reshape(( -1, 1)) idx = np.reshape(np.arange(1, len(y_pred)+ 1),(len(y_pred), -1)) y_pred = np.hstack(( idx, y_pred)) y_pred = pd.DataFrame(y_pred, columns=['ImageId', 'Label']) y_pred.to_csv('y_pred.csv', index=False )
Digit Recognizer
7,033,760
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
%matplotlib inline %config InlineBackend.figure_format = 'retina' print(os.listdir("/kaggle/input/digit-recognizer")) N_FOLDS = 5 BATCH_SIZE = 256
Digit Recognizer
7,033,760
def tokenize_encode(tweets, max_length=None): return pretrained_bert_tokenizer( tweets, add_special_tokens=True, truncation=True, padding="max_length", max_length=max_length, return_tensors="tf", ) max_length_tweet = 72 max_length_keyword = 8 train_tweets_encoded = tokenize_encode(x_train["text"].to_list() , max_length_tweet) validation_tweets_encoded = tokenize_encode(x_val["text"].to_list() , max_length_tweet) train_keywords_encoded = tokenize_encode(x_train["keyword"].to_list() , max_length_keyword) validation_keywords_encoded = tokenize_encode(x_val["keyword"].to_list() , max_length_keyword) train_inputs_encoded = dict(train_tweets_encoded) train_inputs_encoded["keywords"] = train_keywords_encoded["input_ids"] validation_inputs_encoded = dict(validation_tweets_encoded) validation_inputs_encoded["keywords"] = validation_keywords_encoded["input_ids"] <create_dataframe>
PATH = '/kaggle/input/digit-recognizer/'
Digit Recognizer
7,033,760
train_dataset = tf.data.Dataset.from_tensor_slices( (dict(train_tweets_encoded), y_train) ) val_dataset = tf.data.Dataset.from_tensor_slices( (dict(validation_tweets_encoded), y_val) ) train_multi_input_dataset = tf.data.Dataset.from_tensor_slices( (train_inputs_encoded, y_train) ) val_multi_input_dataset = tf.data.Dataset.from_tensor_slices( (validation_inputs_encoded, y_val) ) <feature_engineering>
train_on_gpu = torch.cuda.is_available() if not train_on_gpu: print('Training on CPU...') else: print('Training on GPU...' )
Digit Recognizer
7,033,760
tfidf_vectorizer = sklearn.feature_extraction.text.TfidfVectorizer( tokenizer=tweet_preprocessor, min_df=1, ngram_range=(1, 1), norm="l2" ) train_vectors = tfidf_vectorizer.fit_transform(raw_documents=x_train["text"] ).toarray() validation_vectors = tfidf_vectorizer.transform(x_val["text"] ).toarray()<train_model>
class DatasetMNIST(torch.utils.data.Dataset): def __init__(self, data, augmentations=None): self.data = data self.augmentations = augmentations def __len__(self): return len(self.data) def __getitem__(self, index): item = self.data.iloc[index] image = item[1:].values.astype(np.uint8 ).reshape(( 28, 28, 1)) label = item[0] if self.augmentations is not None: augmented = self.augmentations(image=image) return augmented['image'], label else: return image, label
Digit Recognizer
7,033,760
logisticRegressionClf = LogisticRegression(n_jobs=-1, C=2.78) logisticRegressionClf.fit(train_vectors, y_train) def print_metrics_sk(clf, x_train, y_train, x_val, y_val): print(f"Train Accuracy: {clf.score(x_train, y_train):.2%}") print(f"Validation Accuracy: {clf.score(x_val, y_val):.2%}") print("") print(f"f1 score: {sklearn.metrics.f1_score(y_val, clf.predict(x_val)) :.2%}") print_metrics_sk(logisticRegressionClf, train_vectors, y_train, validation_vectors, y_val )<find_best_model_class>
dataset = pd.read_csv(f'{PATH}train.csv') dataset.head(1 )
Digit Recognizer
7,033,760
feature_extractor = get_pretrained_bert_model() model_outputs = feature_extractor.predict( train_dataset.batch(32) ) train_sentence_vectors = model_outputs.last_hidden_state[:, 0, :] train_word_vectors = model_outputs.last_hidden_state[:, 1:, :] model_outputs = feature_extractor.predict( val_dataset.batch(32) ) validation_sentence_vectors = model_outputs.last_hidden_state[:, 0, :] validation_word_vectors = model_outputs.last_hidden_state[:, 1:, :]<train_model>
def custom_folds(dataset,n_folds=N_FOLDS): train_valid_id = [] start = 0 size = len(dataset) split = size // n_folds valid_size = split for i in range(n_folds): train_data = dataset.drop(dataset.index[start:split] ).index.values valid_data = dataset.loc[start:split-1].index.values train_valid_id.append(( train_data,valid_data)) start += valid_size split += valid_size return train_valid_id
Digit Recognizer
7,033,760
logisticRegressionClf = LogisticRegression(n_jobs=-1, class_weight=class_weights) logisticRegressionClf.fit(train_sentence_vectors, y_train) print_metrics_sk( logisticRegressionClf, train_sentence_vectors, y_train, validation_sentence_vectors, y_val, )<train_on_grid>
train_valid = custom_folds(dataset=dataset )
Digit Recognizer
7,033,760
def create_gru_model() -> keras.Model: model = keras.Sequential() model.add(keras.layers.InputLayer(input_shape=train_word_vectors.shape[1:])) model.add(GRU(32, return_sequences=True)) model.add(GlobalMaxPooling1D()) model.add(Dense(1, activation="sigmoid")) model.compile( optimizer=keras.optimizers.Adam() , loss="binary_crossentropy", metrics=keras.metrics.BinaryAccuracy(name="accuracy"), ) return model model = create_gru_model() history = model.fit( train_word_vectors, y_train, validation_data=(validation_word_vectors, y_val), class_weight=class_weights, epochs=20, verbose=0, callbacks=[ EarlyStopping( monitor="val_accuracy", min_delta=0.001, patience=5, restore_best_weights=True, ) ], ) print_metrics(model, train_word_vectors, y_train, validation_word_vectors, y_val )<choose_model_class>
transform_train = A.Compose([ A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=10), A.Normalize(mean=(0.485,), std=(0.229,)) , ToTensor() , ]) transform_valid = A.Compose([ A.Normalize(mean=(0.485,), std=(0.229,)) , ToTensor() , ] )
Digit Recognizer
7,033,760
def create_multi_input_model() -> keras.Model: keyword_ids = keras.Input(( 8,), name="keywords") keyword_features = Embedding(input_dim=feature_extractor.config.vocab_size, output_dim=16, input_length=8, mask_zero=True )(keyword_ids) keyword_features = Flatten()(keyword_features) keyword_features = Dense(1 )(keyword_features) tweet_classification_vectors = keras.Input(( train_sentence_vectors.shape[1],), name="tweets") tweet_features = Dense(1, activation='relu' )(tweet_classification_vectors) combined_features = concatenate([keyword_features, tweet_features]) combined_prediction = Dense(1, activation="sigmoid" )(combined_features) model = keras.Model(inputs = [keyword_ids, tweet_classification_vectors], outputs=combined_prediction) model.compile( optimizer=keras.optimizers.Adam() , loss="binary_crossentropy", metrics=keras.metrics.BinaryAccuracy(name="accuracy"), ) return model model = create_multi_input_model() train_inputs = {"keywords" : train_keywords_encoded["input_ids"], "tweets" : train_sentence_vectors} validation_inputs = {"keywords" : validation_keywords_encoded["input_ids"], "tweets" : validation_sentence_vectors} history = model.fit( train_inputs, y_train, validation_data=(validation_inputs, y_val), class_weight=class_weights, epochs=20, verbose=0, callbacks=[ EarlyStopping( monitor="val_accuracy", min_delta=0.001, patience=5, restore_best_weights=True, ) ], ) print_metrics(model, train_inputs, y_train, validation_inputs, y_val )<choose_model_class>
train_data = DatasetMNIST(dataset, augmentations=transform_train) valid_data = DatasetMNIST(dataset, augmentations=transform_valid) train_valid_loaders = [] for i in train_valid: train_idx, valid_idx = i train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) train_loader = torch.utils.data.DataLoader(train_data, batch_size=BATCH_SIZE, sampler=train_sampler) valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=BATCH_SIZE, sampler=valid_sampler) train_valid_loaders.append(( train_loader,valid_loader))
Digit Recognizer
7,033,760
def create_multi_input_rnn_model() -> keras.Model: keyword_ids = keras.Input(( 8,), name="keywords") keyword_features = Embedding(input_dim=feature_extractor.config.vocab_size, output_dim=16, input_length=8, mask_zero=True )(keyword_ids) keyword_features = Flatten()(keyword_features) keyword_features = Dense(1 )(keyword_features) tweet_token_embeddings = Input(train_word_vectors.shape[1:], name="tweets") tweet_features = GRU(32, return_sequences=True )(tweet_token_embeddings) tweet_features = GlobalMaxPooling1D()(tweet_features) tweet_features = Dense(1, activation='relu' )(tweet_features) combined_features = concatenate([keyword_features, tweet_features]) combined_prediction = Dense(1, activation="sigmoid" )(combined_features) model = keras.Model(inputs = [keyword_ids, tweet_token_embeddings], outputs=combined_prediction) model.compile( optimizer=keras.optimizers.Adam() , loss="binary_crossentropy", metrics=keras.metrics.BinaryAccuracy(name="accuracy"), ) return model model = create_multi_input_rnn_model() train_inputs = {"keywords" : train_keywords_encoded["input_ids"], "tweets" : train_word_vectors} validation_inputs = {"keywords" : validation_keywords_encoded["input_ids"], "tweets" : validation_word_vectors} history = model.fit( train_inputs, y_train, validation_data=(validation_inputs, y_val), class_weight=class_weights, epochs=20, verbose=0, callbacks=[ EarlyStopping( monitor="val_accuracy", min_delta=0.001, patience=5, restore_best_weights=True, ) ], ) print_metrics(model, train_inputs, y_train, validation_inputs, y_val )<choose_model_class>
class Net(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(1, 32, kernel_size=3) self.bn1 = nn.BatchNorm2d(32) self.conv2 = nn.Conv2d(32, 32, kernel_size=3) self.bn2 = nn.BatchNorm2d(32) self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2, padding=2) self.bn3 = nn.BatchNorm2d(32) self.conv4 = nn.Conv2d(32, 64, kernel_size=3) self.bn4 = nn.BatchNorm2d(64) self.conv5 = nn.Conv2d(64, 64, kernel_size=3) self.bn5 = nn.BatchNorm2d(64) self.conv6 = nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2) self.bn6 = nn.BatchNorm2d(64) self.conv7 = nn.Conv2d(64, 128, kernel_size=4) self.bn7 = nn.BatchNorm2d(128) self.lin1 = nn.Linear(128,10) def forward(self, xb): x = xb.view(-1, 1, 28, 28) x = self.bn1(F.relu(self.conv1(x))) x = self.bn2(F.relu(self.conv2(x))) x = self.bn3(F.relu(self.conv3(x))) x = F.dropout2d(x, 0.25) x = self.bn4(F.relu(self.conv4(x))) x = self.bn5(F.relu(self.conv5(x))) x = self.bn6(F.relu(self.conv6(x))) x = F.dropout2d(x, 0.25) x = self.bn7(F.relu(self.conv7(x))) x = torch.flatten(x, start_dim=1) x = F.dropout2d(x, 0.25) x = self.lin1(x) x = F.softmax(x, dim=1) return x model = Net() print(model) if train_on_gpu: model.cuda()
Digit Recognizer
7,033,760
def create_candidate_model_with_fx(hp: kerastuner.HyperParameters)-> keras.Model: keyword_ids = keras.Input(( 8,), name="keywords") keyword_features = Embedding(input_dim=feature_extractor.config.vocab_size, output_dim=16, input_length=8, mask_zero=True )(keyword_ids) keyword_features = Flatten()(keyword_features) keyword_features = Dense(hp.Choice("keyword_units", values=[1, 8, 16, 32], default=1))(keyword_features) tweet_token_embeddings = Input(train_word_vectors.shape[1:], name="tweets") tweet_features = GRU(hp.Choice("GRU_units", values=[8, 16, 32, 64, 128], default=32), return_sequences=True )(tweet_token_embeddings) tweet_features = Dropout(hp.Float("GRU_dropout", min_value=0.0, max_value=0.5, step=0.1))(tweet_features) tweet_features = GlobalMaxPooling1D()(tweet_features) for i in range(hp.Int("num_layers", min_value=0, max_value=3, step=1)) : tweet_features = Dense(hp.Choice("layer_" + str(i)+ "_units", values=[2, 8, 16, 32, 64, 128, 256]), activation="relu" )(tweet_features) tweet_features = Dropout(hp.Float("layer_" + str(i)+ "_dropout", min_value=0.0, max_value=0.5, step=0.1))(tweet_features) combined_features = concatenate([keyword_features, tweet_features]) combined_prediction = Dense(1, activation="sigmoid" )(combined_features) model = keras.Model(inputs = [keyword_ids, tweet_token_embeddings], outputs=combined_prediction) model.compile( optimizer=keras.optimizers.Adam() , loss="binary_crossentropy", metrics=keras.metrics.BinaryAccuracy(name="accuracy"), ) return model train_inputs = {"keywords" : train_keywords_encoded["input_ids"], "tweets" : train_word_vectors} validation_inputs = {"keywords" : validation_keywords_encoded["input_ids"], "tweets" : validation_word_vectors} <define_variables>
class DatasetSubmissionMNIST(torch.utils.data.Dataset): def __init__(self, file_path, augmentations=None): self.data = pd.read_csv(file_path) self.augmentations = augmentations def __len__(self): return len(self.data) def __getitem__(self, index): image = self.data.iloc[index].values.astype(np.uint8 ).reshape(( 28, 28, 1)) if self.augmentations is not None: augmented = self.augmentations(image=image) return augmented['image'] return image
Digit Recognizer
7,033,760
MAX_EPOCHS = 10 FACTOR = 3 ITERATIONS = 3 print(f"Number of models in each bracket: {math.ceil(1 + math.log(MAX_EPOCHS, FACTOR)) }") print(f"Number of epochs over all trials: {round(ITERATIONS *(MAX_EPOCHS *(math.log(MAX_EPOCHS, FACTOR)** 2)))}" )<train_on_grid>
transform_test = A.Compose([ A.Normalize(mean=(0.485,), std=(0.229,)) , ToTensor() , ]) submissionset = DatasetSubmissionMNIST(f'{PATH}test.csv', augmentations=transform_test) submissionloader = torch.utils.data.DataLoader(submissionset, batch_size=BATCH_SIZE, shuffle=False )
Digit Recognizer
7,033,760
tuner = kerastuner.Hyperband( create_candidate_model_with_fx, max_epochs=MAX_EPOCHS, hyperband_iterations=ITERATIONS, factor=FACTOR, objective="val_accuracy", directory="hyperparam-search", project_name="architecture-hyperband", ) tuner.search( train_inputs, y_train, validation_data=(validation_inputs, y_val), class_weight=class_weights, epochs=10, verbose=1, callbacks=[ EarlyStopping( monitor="val_accuracy", min_delta=0.001, patience=3, restore_best_weights=True, ) ], ) <train_model>
def every_predict(model,submissionloader=submissionloader): all_batchs = [] with torch.no_grad() : model.eval() for images in submissionloader: if train_on_gpu: images = images.cuda() ps = model(images) all_batchs.append(ps.to('cpu' ).detach().numpy()) return all_batchs
Digit Recognizer
7,033,760
best_model = tuner.get_best_models() [0] print("") best_arch_hp = tuner.get_best_hyperparameters() [0] pprint.pprint(best_arch_hp.values, indent=4) print("") print_metrics(best_model, train_inputs, y_train, validation_inputs, y_val )<choose_model_class>
five_predict = [] all_train_losses, all_valid_losses = [], [] FOLD = 1 for i in train_valid_loaders: model = Net() if train_on_gpu: model.cuda() train_loader, valid_loader = i LEARNING_RATE = 0.01 criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters() ,lr=LEARNING_RATE) epochs = 120 valid_loss_min = np.Inf train_losses, valid_losses = [], [] history_accuracy = [] scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.7,patience=2) model.train() for e in range(1, epochs+1): running_loss = 0 for images, labels in train_loader: if train_on_gpu: images, labels = images.cuda() , labels.cuda() optimizer.zero_grad() ps = model(images) loss = criterion(ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: valid_loss = 0 accuracy = 0 with torch.no_grad() : model.eval() for images, labels in valid_loader: if train_on_gpu: images, labels = images.cuda() , labels.cuda() ps = model(images) _, top_class = ps.topk(1, dim=1) equals = top_class == labels.view(*top_class.shape) valid_loss += criterion(ps, labels) accuracy += torch.mean(equals.type(torch.FloatTensor)) model.train() train_losses.append(running_loss/len(train_loader)) valid_losses.append(valid_loss/len(valid_loader)) history_accuracy.append(accuracy/len(valid_loader)) network_learned = valid_loss < valid_loss_min if e == 1 or e % 5 == 0 or network_learned: print(f"Epoch: {e}/{epochs}.. ", f"Training Loss: {running_loss/len(train_loader):.4f}.. ", f"Validation Loss: {valid_loss/len(valid_loader):.4f}.. ", f"Valid Accuracy: {accuracy/len(valid_loader):.4f}") if network_learned: valid_loss_min = valid_loss torch.save(model.state_dict() , f'best_model_fold{FOLD}.pt') print('Detected network improvement, saving current model') scheduler.step(running_loss) all_train_losses.append(train_losses) all_valid_losses.append(valid_losses) model.load_state_dict(torch.load(f'best_model_fold{FOLD}.pt')) model.eval() five_predict.append(every_predict(model)) model.train() FOLD +=1
Digit Recognizer
7,033,760
<choose_model_class><EOS>
flat_list = [] for sublist in five_predict: for item in sublist: for i in item: flat_list.append(i) final = [] for i in range(0,28000): numbers = [i+a*28000 for a in range(N_FOLDS)] final.append(sum(flat_list[C] for C in numbers)) subm = np.argmax(( final),axis=1) sample_subm = pd.read_csv(f'{PATH}sample_submission.csv') sample_subm['Label'] = subm sample_subm.to_csv('submission.csv',index=False )
Digit Recognizer
3,821,285
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<choose_model_class>
WORKERS = 2 CHANNEL = 3 warnings.filterwarnings("ignore") SIZE = 128 NUM_CLASSES = 10 %config InlineBackend.figure_format = 'retina' %matplotlib inline
Digit Recognizer
3,821,285
def create_model_candidate() -> keras.Model: pretrained_bert_model = get_pretrained_bert_model() keyword_ids = keras.Input(( 8,), name="keywords") keyword_features = Embedding(input_dim=pretrained_bert_model.config.vocab_size, output_dim=16, input_length=8, mask_zero=True )(keyword_ids) keyword_features = Flatten()(keyword_features) keyword_features = Dense(best_arch_hp.get("keyword_units"))(keyword_features) input_ids = Input(shape=(max_length_tweet,), dtype="int32", name="input_ids") attention_mask = Input(shape=(max_length_tweet,), dtype="int32", name="attention_mask") bert_outputs = pretrained_bert_model(input_ids, attention_mask) bert_token_embeddings = bert_outputs.last_hidden_state[:, 1:, :] tweet_features = GRU(best_arch_hp.get("GRU_units"), return_sequences=True )(bert_token_embeddings) tweet_features = Dropout(best_arch_hp.get("GRU_dropout"))(tweet_features) tweet_features = GlobalMaxPooling1D()(tweet_features) for i in range(best_arch_hp.get("num_layers")) : tweet_features = Dense(best_arch_hp.get("layer_" + str(i)+ "_units"), activation="relu" )(tweet_features) tweet_features = Dropout(best_arch_hp.get("layer_" + str(i)+ "_dropout"))(tweet_features) combined_features = concatenate([keyword_features, tweet_features]) combined_prediction = Dense(1, activation="sigmoid" )(combined_features) model = keras.Model(inputs = [keyword_ids, input_ids, attention_mask], outputs=combined_prediction) model.compile( optimizer=keras.optimizers.Adam(learning_rate=5e-5), loss="binary_crossentropy", metrics=keras.metrics.BinaryAccuracy(name="accuracy"), ) return model <train_model>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
3,821,285
model = create_model_candidate() history = model.fit( train_multi_input_dataset.batch(32), validation_data=val_multi_input_dataset.batch(32), epochs=6, class_weight=class_weights, callbacks=[ keras.callbacks.EarlyStopping( monitor="val_accuracy", restore_best_weights=True ) ], ) best_epoch = len(history.history["val_accuracy"])- 1 print_metrics( model, train_inputs_encoded, y_train, validation_inputs_encoded, y_val )<categorify>
x = x / 255.0 test = test / 255.0
Digit Recognizer
3,821,285
test_tweets_encoded = tokenize_encode(test_df["text"].to_list() , max_length_tweet) test_inputs_encoded = dict(test_tweets_encoded) test_dataset = tf.data.Dataset.from_tensor_slices(test_inputs_encoded) test_keywords_encoded = tokenize_encode(test_df["keyword"].to_list() , max_length_keyword) test_inputs_encoded["keywords"] = test_keywords_encoded["input_ids"] test_multi_input_dataset = tf.data.Dataset.from_tensor_slices(test_inputs_encoded )<train_model>
y = to_categorical(y, num_classes = 10 )
Digit Recognizer
3,821,285
full_train_dataset = train_multi_input_dataset.concatenate(val_multi_input_dataset) model = create_model_candidate() model.fit( full_train_dataset.batch(32), epochs=best_epoch, class_weight=class_weights, )<save_to_csv>
x_train, x_valid, y_train, y_valid = train_test_split(x, y, test_size = 0.1, random_state=2, stratify = y, shuffle = True )
Digit Recognizer
3,821,285
preds = np.squeeze(model.predict(test_multi_input_dataset.batch(32))) preds =(preds >= 0.5 ).astype(int) pd.DataFrame({"id": test_df.id, "target": preds} ).to_csv("submission.csv", index=False )<import_modules>
BatchNormalization, Input, Conv2D, GlobalAveragePooling2D)
Digit Recognizer
3,821,285
import numpy as np import pandas as pd import os <import_modules>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
3,821,285
import re import seaborn as sns import matplotlib.pyplot as plt from collections import defaultdict, Counter from sklearn.feature_extraction.text import CountVectorizer import nltk from nltk.corpus import stopwords from wordcloud import WordCloud from nltk.tokenize import word_tokenize<set_options>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
3,821,285
nltk.download('stopwords', quiet=True) stopwords = stopwords.words('english') sns.set(style="white", font_scale=1.2) plt.rcParams["figure.figsize"] = [10,8] pd.set_option.display_max_columns = 0 pd.set_option.display_max_rows = 0<load_from_csv>
EarlyStopping, ReduceLROnPlateau) epochs = 80; batch_size = 1024 checkpoint = ModelCheckpoint('.. /working/Resnet50-visible.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only = True) reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=4, verbose=1, mode='min', epsilon=0.0001) early = EarlyStopping(monitor="val_loss", mode="min", patience=9) callbacks_list = [checkpoint, reduceLROnPlat, early]
Digit Recognizer
3,821,285
train = pd.read_csv(".. /input/nlp-getting-started/train.csv") test = pd.read_csv(".. /input/nlp-getting-started/test.csv" )<feature_engineering>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
3,821,285
null_counts = pd.DataFrame({"Num_Null": train.isnull().sum() }) null_counts["Pct_Null"] = null_counts["Num_Null"] / train.count() * 100 null_counts<count_values>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(x_train )
Digit Recognizer
3,821,285
len(train["keyword"].value_counts() )<count_values>
batch_size = 1024 epochs = 80 history = model.fit_generator(datagen.flow(x_train,y_train, batch_size=batch_size), epochs = epochs, validation_data =(x_valid,y_valid), verbose = 1, steps_per_epoch=x_train.shape[0] // batch_size , callbacks=callbacks_list )
Digit Recognizer
3,821,285
disaster_keywords = train.loc[train["target"] == 1]["keyword"].value_counts() nondisaster_keywords = train.loc[train["target"] == 0]["keyword"].value_counts() <feature_engineering>
model.load_weights('.. /working/Resnet50-visible.h5') results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
3,821,285
<sort_values><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
7,405,218
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_values>
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from keras.utils import to_categorical from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
7,405,218
len(train["location"].value_counts() )<remove_duplicates>
np.random.seed(1) X_raw = pd.read_csv(".. /input/digit-recognizer/train.csv") X_test_raw = pd.read_csv(".. /input/digit-recognizer/test.csv") y = X_raw["label"] X = X_raw.drop(labels = ["label"],axis = 1) X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=0) WIDTH=28 HEIGHT=28 NUM_CLASSES=10 X_train = X_train.values y_train = y_train.values X_valid = X_valid.values y_valid = y_valid.values y_train_oh = to_categorical(y_train, num_classes = NUM_CLASSES) y_valid_oh = to_categorical(y_valid, num_classes = NUM_CLASSES) X_test = X_test_raw.values
Digit Recognizer
7,405,218
def create_corpus(target): corpus = [] for w in train.loc[train["target"] == target]["text"].str.split() : for i in w: corpus.append(i) return corpus def create_corpus_dict(target): corpus = create_corpus(target) stop_dict = defaultdict(int) for word in corpus: if word in stopwords: stop_dict[word] += 1 return sorted(stop_dict.items() , key=lambda x:x[1], reverse=True )<count_duplicates>
def scaleData(X): n_max = X_train.max() X = X/n_max return X def reshape_channel(X): return np.expand_dims(X.reshape(-1,HEIGHT,WIDTH),-1) def preprocessData(X): X = scaleData(X) X = reshape_channel(X) return X
Digit Recognizer
7,405,218
corpus_disaster, corpus_non_disaster = create_corpus(1), create_corpus(0) counter_disaster, counter_non_disaster = Counter(corpus_disaster), Counter(corpus_non_disaster) x_disaster, y_disaster, x_non_disaster, y_non_disaster = [], [], [], [] counter = 0 for word, count in counter_disaster.most_common() [0:100]: if(word not in stopwords and counter < 15): counter += 1 x_disaster.append(word) y_disaster.append(count) counter = 0 for word, count in counter_non_disaster.most_common() [0:100]: if(word not in stopwords and counter < 15): counter += 1 x_non_disaster.append(word) y_non_disaster.append(count) fig, ax = plt.subplots(1,2, figsize=(20,8)) sns.barplot(x=y_disaster, y=x_disaster, orient='h', palette="Reds_d", ax=ax[0]) sns.barplot(x=y_non_disaster, y=x_non_disaster, orient='h', palette="Blues_d", ax=ax[1]) ax[0].set_title("Top 15 Non-Stopwords - Disaster Tweets") ax[0].set_xlabel("Word Frequency") ax[1].set_title("Top 15 Non-Stopwords - Non-Disaster Tweets") ax[1].set_xlabel("Word Frequency") plt.tight_layout() plt.show()<feature_engineering>
optimizer = Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.9999, amsgrad=True) model = Sequential() model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'same',activation ='relu',use_bias=True,input_shape =(HEIGHT,WIDTH,1))) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'same',activation ='relu',use_bias=True)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'same',activation ='relu',use_bias=True)) model.add(MaxPool2D(pool_size=(3,3), strides=(3,3))) model.add(Dropout(0.2)) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'same',activation ='relu',use_bias=True)) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'same',activation ='relu',use_bias=True)) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'same',activation ='relu',use_bias=True)) model.add(MaxPool2D(pool_size=(5,5), strides=(5,5))) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(64, activation = "relu")) model.add(Dense(64, activation = "relu")) model.add(Dense(NUM_CLASSES, activation = "softmax")) model.compile(optimizer ='adam', loss = "categorical_crossentropy", metrics=["accuracy"]) model.summary()
Digit Recognizer
7,405,218
def bigrams(target): corpus = train[train["target"] == target]["text"] count_vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus) bag_of_words = count_vec.transform(corpus) sum_words = bag_of_words.sum(axis=0) words_freq = [(word, sum_words[0, idx])for word, idx in count_vec.vocabulary_.items() ] words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True) return words_freq<feature_engineering>
datagen = ImageDataGenerator( rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, ) it_train = datagen.flow(preprocessData(X_train), y_train_oh) it_valid = datagen.flow(preprocessData(X_valid), y_valid_oh )
Digit Recognizer
7,405,218
def remove_pattern(input_txt, pattern): r = re.findall(pattern, input_txt) for i in r: input_txt = re.sub(i, '', input_txt) return input_txt<feature_engineering>
hist = model.fit_generator(it_train,validation_data=it_valid,callbacks=[lrate],epochs=n_epoch)
Digit Recognizer
7,405,218
<feature_engineering><EOS>
y_pred = model.predict(preprocessData(X_test)) y_pred = np.argmax(y_pred,axis = 1) showImg(X_test,y_pred,4,4) submission = pd.DataFrame({'ImageId':range(1,28001),'Label':y_pred}) submission.to_csv('submission.csv',index=False )
Digit Recognizer
7,753,580
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
import numpy as np import pandas as pd from keras.utils.np_utils import to_categorical from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau from sklearn.metrics import confusion_matrix,accuracy_score from sklearn.model_selection import train_test_split import matplotlib import matplotlib.pyplot as plt
Digit Recognizer
7,753,580
train['tweet'] = train['tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3])) test['tweet'] = test['tweet'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3])) train.head() <data_type_conversions>
def print_metrics(y_train,y_pred): conf_mx = confusion_matrix(y_train,y_pred) print(conf_mx) print("------------------------------------------") print(" Accuracy : ", accuracy_score(y_train,y_pred)) print("------------------------------------------") def shift_image(X, dx, dy,length=28): X=X.reshape(length,length) X = np.roll(X, dy, axis=0) X = np.roll(X, dx, axis=1) return X.reshape([-1]) def print_image(flat_image,length=28): plt.imshow(flat_image.reshape(length, length), cmap = matplotlib.cm.binary,interpolation="nearest") plt.axis("off") plt.show()
Digit Recognizer
7,753,580
train['tweet'] = train['tweet'].str.lower() test['tweet'] = test['tweet'].str.lower()<string_transform>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" ).values y = train["label"].values X = train.drop(labels = ["label"],axis = 1 ).values print("Value Counts :") print(train["label"].value_counts()) del train X = X / 255.0 test = test / 255.0 print("dim(X)= ",X.shape) print("dim(y)= ",y.shape) print("dim(test)= ",test.shape )
Digit Recognizer
7,753,580
set(stopwords.words('english')) stops = set(stopwords.words('english'))<feature_engineering>
DATA_AUGMENTED_WITH_SHIFT = False
Digit Recognizer
7,753,580
train['tokenized_sents'] = train.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1) test['tokenized_sents'] = test.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1) <drop_column>
if DATA_AUGMENTED_WITH_SHIFT: X_augmented = [image for image in X] y_augmented = [label for label in y] for dx, dy in(( 1,1),(-1,-1),(-1,1),(1,-1)) : for image, label in zip(X, y): X_augmented.append(shift_image(image, dx, dy)) y_augmented.append(label) X_augmented = np.array(X_augmented) y_augmented = np.array(y_augmented) print(" X_augmented Dimension : ",X_augmented.shape) shuffle_idx = np.random.permutation(len(X_augmented)) X_augmented = X_augmented[shuffle_idx] y_augmented = y_augmented[shuffle_idx] X_train = X_augmented.reshape(-1,28,28,1) test = test.reshape(-1,28,28,1) Y_train = to_categorical(y_augmented, num_classes = 10) else: X_train = X.reshape(-1,28,28,1) test = test.reshape(-1,28,28,1) Y_train = to_categorical(y, num_classes = 10) print("dim(X_train)= ",X_train.shape) print("dim(Y_train)= ",Y_train.shape) print("dim(test)= ",test.shape )
Digit Recognizer
7,753,580
def remove_stops(row): my_list = row['tokenized_sents'] meaningful_words = [w for w in my_list if not w in stops] return(meaningful_words )<drop_column>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, random_state=42) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
7,753,580
train['clean_tweet'] = train.apply(remove_stops, axis=1) test['clean_tweet'] = test.apply(remove_stops, axis=1) train.drop(["tweet","tokenized_sents"], axis = 1, inplace = True) test.drop(["tweet","tokenized_sents"], axis = 1, inplace = True) <string_transform>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
7,753,580
def rejoin_words(row): my_list = row['clean_tweet'] joined_words =(" ".join(my_list)) return joined_words train['clean_tweet'] = train.apply(rejoin_words, axis=1) test['clean_tweet'] = test.apply(rejoin_words, axis=1) train.head()<import_modules>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"]) learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy', patience=3, verbose=1, factor=0.5, min_lr=0.00001)
Digit Recognizer
7,753,580
import gc import time import math import random import warnings<set_options>
epochs = 30 batch_size = 71 model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction])
Digit Recognizer
7,753,580
warnings.filterwarnings("ignore" )<import_modules>
Y_pred = model.predict(X_val) Y_pred_classes = np.argmax(Y_pred,axis = 1) Y_true = np.argmax(Y_val,axis = 1) print_metrics(Y_true, Y_pred_classes )
Digit Recognizer
7,753,580
import string import folium from colorama import Fore, Back, Style, init <import_modules>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer
4,449,954
import scipy as sp import networkx as nx from pandas import Timestamp from PIL import Image from IPython.display import SVG from keras.utils import model_to_dot import requests from IPython.display import HTML<set_options>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,449,954
tqdm.pandas()<import_modules>
train = pd.read_csv('.. /input/train.csv') test = pd.read_csv('.. /input/test.csv' )
Digit Recognizer
4,449,954
import plotly.express as px import plotly.graph_objects as go import plotly.figure_factory as ff from plotly.subplots import make_subplots import transformers import tensorflow as tf<import_modules>
Y_train = train['label'] X_train = train.drop(labels=['label'],axis=1) fig, ax = plt.subplots(figsize=(16,8)) sns.countplot(Y_train,ax=ax )
Digit Recognizer
4,449,954
from tensorflow.keras.callbacks import Callback from sklearn.metrics import accuracy_score, roc_auc_score from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger <import_modules>
Y_train.value_counts()
Digit Recognizer
4,449,954
from tensorflow.keras.models import Model from kaggle_datasets import KaggleDatasets from tensorflow.keras.optimizers import Adam from tokenizers import BertWordPieceTokenizer from tensorflow.keras.layers import Dense, Input, Dropout, Embedding from tensorflow.keras.layers import LSTM, GRU, Conv1D, SpatialDropout1D <import_modules>
X_train = X_train / 255. test = test / 255. X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
4,449,954
from tensorflow.keras import layers from tensorflow.keras import optimizers from tensorflow.keras import activations from tensorflow.keras import constraints from tensorflow.keras import initializers from tensorflow.keras import regularizers import tensorflow.keras.backend as K from tensorflow.keras.layers import * from tensorflow.keras.optimizers import * from tensorflow.keras.activations import * from tensorflow.keras.constraints import * from tensorflow.keras.initializers import * from tensorflow.keras.regularizers import * <import_modules>
Y_train = to_categorical(Y_train,num_classes = 10 )
Digit Recognizer
4,449,954
from sklearn import metrics from sklearn.utils import shuffle from gensim.models import Word2Vec from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer,HashingVectorizer from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.ensemble import RandomForestClassifier from xgboost import XGBClassifier from sklearn.tree import DecisionTreeClassifier <import_modules>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1 )
Digit Recognizer
4,449,954
from nltk.stem.wordnet import WordNetLemmatizer from nltk.tokenize import word_tokenize from nltk.tokenize import TweetTokenizer import nltk from textblob import TextBlob from nltk.corpus import wordnet from nltk.corpus import stopwords from nltk import WordNetLemmatizer from nltk.stem import WordNetLemmatizer,PorterStemmer from wordcloud import WordCloud, STOPWORDS from nltk.sentiment.vader import SentimentIntensityAnalyzer from nltk.tokenize import sent_tokenize, word_tokenize <choose_model_class>
model = Sequential() model.add(Conv2D(filters = 128, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 32, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax")) model.compile(optimizer=RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0),loss = "categorical_crossentropy", metrics=["accuracy"]) model.summary()
Digit Recognizer
4,449,954
stopword=set(STOPWORDS) lem = WordNetLemmatizer() tokenizer=TweetTokenizer() np.random.seed(0) random_state = 42<install_modules>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
4,449,954
!pip install GPUtil <import_modules>
callbacks_list = [ ModelCheckpoint(filepath='./my_model.h5',monitor='val_loss'), ReduceLROnPlateau(monitor='val_acc', patience=5, verbose=2, factor=0.5, min_lr=0.00001), TensorBoard("logs")] epochs = 20 batch_size =64 history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), steps_per_epoch=X_train.shape[0] // batch_size , callbacks=callbacks_list )
Digit Recognizer
4,449,954
from torch import nn from transformers import AdamW, BertConfig, BertModel, BertTokenizer from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, random_split from transformers import get_linear_schedule_with_warmup from sklearn.metrics import f1_score, accuracy_score<set_options>
%load_ext tensorboard.notebook %tensorboard --logdir logs
Digit Recognizer
4,449,954
def free_gpu_cache() : print("Initial GPU Usage") gpu_usage() torch.cuda.empty_cache() cuda.select_device(0) cuda.close() cuda.select_device(0) for obj in gc.get_objects() : if torch.is_tensor(obj): del obj gc.collect() print("GPU Usage after emptying the cache") gpu_usage()<import_modules>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
4,449,954
<load_from_csv><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("predict.csv",index=False )
Digit Recognizer
7,407,207
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<set_options>
tf.__version__
Digit Recognizer
7,407,207
if torch.cuda.is_available() : device = torch.device("cuda") else: device = torch.device("cpu") device<count_duplicates>
train = pd.read_csv(r'/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv(r'/kaggle/input/digit-recognizer/test.csv') train.shape, test.shape
Digit Recognizer
7,407,207
dupli_sum = train.duplicated().sum() if(dupli_sum>0): print(dupli_sum, " duplicates found removing...") train = train.loc[False==train.duplicated() , :] else: print("no duplicates found") train<prepare_x_and_y>
X_train = x_train = train.drop(['label'],1) Y_train = train['label'] x_test = test
Digit Recognizer
7,407,207
X_train = train["text"].values y_train = train["target"].values<load_pretrained>
X_train = X_train.astype('float32') x_test = x_test.astype('float32') X_train = X_train/255 x_test - x_test/255
Digit Recognizer
7,407,207
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) lens = [] for text in X_train: encoded_dict = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='pt') lens.append(encoded_dict['input_ids'].size() [1] )<categorify>
Y_train= tf.keras.utils.to_categorical(Y_train, 10) Y_train.shape
Digit Recognizer
7,407,207
sequence_length = 58 X_train_tokens = [] for text in X_train: encoded_dict = tokenizer.encode_plus(text, add_special_tokens=True, max_length=sequence_length, padding="max_length", return_tensors='pt', truncation=True) X_train_tokens.append(encoded_dict['input_ids'] )<concatenate>
x_train, val_x, y_train, val_y = train_test_split(X_train, Y_train, test_size=0.20 )
Digit Recognizer
7,407,207
X_train_tokens = torch.cat(X_train_tokens, dim=0) y_train = torch.tensor(y_train )<train_model>
es = EarlyStopping(monitor='loss', patience=12) filepath="/kaggle/working/bestmodel.h5" md = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min' )
Digit Recognizer
7,407,207
print('Original: ', X_train[5]) print('Tokenization: ', X_train_tokens[5] )<split>
datagen = ImageDataGenerator(zoom_range = 0.1, height_shift_range = 0.1, width_shift_range = 0.1, rotation_range = 10 )
Digit Recognizer
7,407,207
batch_size = 32 dataset = TensorDataset(X_train_tokens, y_train.float()) train_size = int(0.80 * len(dataset)) val_size = len(dataset)- train_size train_set, val_set = random_split(dataset, [train_size, val_size]) train_dataloader = DataLoader(train_set, sampler=RandomSampler(train_set), batch_size=batch_size) validation_dataloader = DataLoader(val_set, sampler=RandomSampler(val_set), batch_size=batch_size )<categorify>
epochs = 30 num_classes = 10 batch_size = 30 input_shape =(28, 28, 1) adam = tf.keras.optimizers.Adam(learning_rate=0.0001, beta_1=0.9, beta_2=0.999, amsgrad=False )
Digit Recognizer
7,407,207
bert = BertModel.from_pretrained("bert-base-uncased") bert.to(device) for batch in train_dataloader: batch_features = batch[0].to(device) bert_output = bert(input_ids=batch_features) print("bert output: ", type(bert_output), len(bert_output)) print("first entry: ", type(bert_output[0]), bert_output[0].size()) print("second entry: ", type(bert_output[1]), bert_output[1].size()) break<choose_model_class>
model = Sequential() model.add(Conv2D(32,(3, 3), padding='same', input_shape=input_shape, activation= tf.nn.relu)) model.add(Conv2D(32,(3, 3), padding='same', activation= tf.nn.relu)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3, 3), padding='same', activation= tf.nn.relu)) model.add(MaxPool2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation=tf.nn.relu)) model.add(Dropout(0.25)) model.add(Dense(512, activation=tf.nn.relu)) model.add(Dropout(0.25)) model.add(Dense(256, activation=tf.nn.relu)) model.add(Dropout(0.5)) model.add(Dense(10, activation= tf.nn.softmax)) model.compile(optimizer= adam, loss= tf.keras.losses.categorical_crossentropy, metrics=["accuracy"]) model.summary()
Digit Recognizer
7,407,207
class BertClassifier(nn.Module): def __init__(self): super(BertClassifier, self ).__init__() self.bert = BertModel.from_pretrained('bert-base-uncased') self.linear = nn.Linear(768, 1) self.sigmoid = nn.Sigmoid() def forward(self, tokens): bert_output = self.bert(input_ids=tokens) linear_output = self.linear(bert_output[1]) proba = self.sigmoid(linear_output) return proba<compute_test_metric>
History = model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size), epochs = epochs, validation_data =(val_x, val_y), callbacks = [es,md], shuffle= True )
Digit Recognizer
7,407,207
def eval(y_batch, probas): preds_batch_np = np.round(probas.cpu().detach().numpy()) y_batch_np = y_batch.cpu().detach().numpy() acc = accuracy_score(y_true=y_batch_np, y_pred=preds_batch_np) f1 = f1_score(y_true=y_batch_np, y_pred=preds_batch_np, average='weighted') return acc, f1 <train_model>
model1 = load_model("/kaggle/working/bestmodel.h5" )
Digit Recognizer
7,407,207
def train(model, optimizer, scheduler, epochs, name): history = [] best_f1 = 0 model.train() for epoch in range(epochs): print("=== Epoch: ", epoch+1, " / ", epochs, " ===") acc_total = 0 f1_total = 0 for it, batch in enumerate(train_dataloader): x_batch, y_batch = [batch[0].to(device), batch[1].to(device)] probas = torch.flatten(model(tokens=x_batch)) acc_f1_batch = eval(y_batch, probas) acc_total, f1_total = acc_total + acc_f1_batch[0], f1_total + acc_f1_batch[1] model.zero_grad() loss_func = nn.BCELoss() batch_loss = loss_func(probas, y_batch) batch_loss.backward() optimizer.step() scheduler.step() acc_total = acc_total/len(train_dataloader) f1_total = f1_total/len(train_dataloader) print("accuracy: ", acc_total, " f1: ", f1_total) acc_val_total = 0 f1_val_total = 0 for batch in validation_dataloader: x_batch, y_batch = [batch[0].to(device), batch[1].to(device)] with torch.no_grad() : probas = torch.flatten(model(tokens=x_batch)) acc_f1_val_batch = eval(y_batch, probas) acc_val_total, f1_val_total = acc_val_total + acc_f1_val_batch[0], f1_val_total + acc_f1_val_batch[1] acc_val_total = acc_val_total/len(validation_dataloader) f1_val_total = f1_val_total/len(validation_dataloader) print("validation accuracy: ", acc_val_total, " validation f1: ", f1_val_total, " ") if(f1_val_total>best_f1): torch.save(model, name+".pt") best_f1 = f1_val_total history.append({"acc":acc_total, "f1":f1_total, "acc_val":acc_val_total, "f1_val":f1_val_total}) return [torch.load(name+".pt"), history]<normalization>
pred = model1.predict(x_test) pred_class = model1.predict_classes(x_test )
Digit Recognizer
7,407,207
<train_model><EOS>
submissions=pd.DataFrame({"ImageId": list(range(1,len(pred_class)+1)) , "Label": pred_class}) submissions.to_csv("submissions.csv", index=False, header=True) submissions
Digit Recognizer
967,865
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<create_dataframe>
%matplotlib inline np.random.seed(2) sns.set(style='white', context='notebook', palette='deep' )
Digit Recognizer
967,865
history_df = pd.DataFrame(history) history_df<load_from_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
967,865
X_test = pd.read_csv(".. /input/nlp-getting-started/test.csv")["text"] X_test_tokens = [] for text in X_test: encoded_dict = tokenizer.encode_plus(text, add_special_tokens=True, max_length=sequence_length, padding="max_length", return_tensors='pt', truncation=True) X_test_tokens.append(encoded_dict['input_ids']) X_test_tokens = torch.cat(X_test_tokens, dim=0) test_set = TensorDataset(X_test_tokens) test_dataloader = DataLoader(test_set, sampler=SequentialSampler(test_set), batch_size=batch_size )<load_from_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
967,865
X_test = pd.read_csv(".. /input/nlp-getting-started/test.csv")["text"] X_test_tokens = [] for text in X_test: encoded_dict = tokenizer.encode_plus(text, add_special_tokens=True, max_length=sequence_length, padding="max_length", return_tensors='pt', truncation=True) X_test_tokens.append(encoded_dict['input_ids']) X_test_tokens = torch.cat(X_test_tokens, dim=0) test_set = TensorDataset(X_test_tokens) test_dataloader = DataLoader(test_set, sampler=SequentialSampler(test_set), batch_size=batch_size )<categorify>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
967,865
all_preds = [] for batch in test_dataloader: x_batch = batch[0].to(device) with torch.no_grad() : probas = baseline_bert_clf(tokens=x_batch) preds = np.round(probas.cpu().detach().numpy() ).astype(int ).flatten() all_preds.extend(preds )<save_to_csv>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
967,865
challenge_pred = pd.concat([pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv")["id"], pd.Series(all_preds)], axis=1) challenge_pred.columns = ['id', 'target'] challenge_pred.to_csv("submission.csv", index=False )<import_modules>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
967,865
import numpy as np import pandas as pd from fastai.text.all import * import re<load_from_csv>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )
Digit Recognizer
967,865
dir_path = "/kaggle/input/nlp-getting-started/" train_df = pd.read_csv(dir_path + "train.csv") test_df = pd.read_csv(dir_path + "test.csv" )<drop_column>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', kernel_initializer='he_normal', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', kernel_initializer='he_normal', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', kernel_initializer='he_normal', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', kernel_initializer='he_normal', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.2)) model.add(Conv2D(128,(3, 3), activation='relu',padding='same',kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(BatchNormalization()) model.add(Dropout(0.25)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
967,865
train_df = train_df.drop(columns=["id", "keyword", "location"] )<count_values>
optimizer = Adam(lr=0.003 )
Digit Recognizer
967,865
train_df["target"].value_counts()<feature_engineering>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
967,865
def remove_URL(text): url = re.compile(r'https?://\S+|www\.\S+') return url.sub(r'',text) train_df["text"] = train_df["text"].apply(remove_URL) test_df["text"] = test_df["text"].apply(remove_URL )<feature_engineering>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.000001 )
Digit Recognizer
967,865
def remove_html(text): html=re.compile(r'<.*?>') return html.sub(r'',text) train_df["text"] = train_df["text"].apply(remove_html) test_df["text"] = test_df["text"].apply(remove_html )<drop_column>
epochs = 35 batch_size = 64
Digit Recognizer
967,865
def remove_emoji(text): emoji_pattern = re.compile("[" u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF" u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE) return emoji_pattern.sub(r'', text) train_df["text"] = train_df["text"].apply(remove_emoji) test_df["text"] = test_df["text"].apply(remove_emoji )<string_transform>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
967,865
train_df["text"].apply(lambda x:len(x.split())).plot(kind="hist");<import_modules>
history = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
967,865
from transformers import AutoTokenizer, AutoModelForSequenceClassification<load_pretrained>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
967,865
tokenizer = AutoTokenizer.from_pretrained("roberta-large" )<string_transform>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
4,048,733
train_tensor = tokenizer(list(train_df["text"]), padding="max_length", truncation=True, max_length=30, return_tensors="pt")["input_ids"]<categorify>
Train = pd.read_csv(".. /input/train.csv") Test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
4,048,733
class TweetDataset: def __init__(self, tensors, targ, ids): self.text = tensors[ids, :] self.targ = targ[ids].reset_index(drop=True) def __len__(self): return len(self.text) def __getitem__(self, idx): t = self.text[idx] y = self.targ[idx] return t, tensor(y )<split>
y_train = Train['label'] X_train = Train.drop(labels='label', axis=1) y_train.value_counts()
Digit Recognizer
4,048,733
train_ids, valid_ids = RandomSplitter()(train_df) target = train_df["target"] train_ds = TweetDataset(train_tensor, target, train_ids) valid_ds = TweetDataset(train_tensor, target, valid_ids) train_dl = DataLoader(train_ds, bs=64) valid_dl = DataLoader(valid_ds, bs=512) dls = DataLoaders(train_dl, valid_dl ).to("cuda" )<choose_model_class>
X_train = X_train/255.0 Test = Test/255.0
Digit Recognizer
4,048,733
bert = AutoModelForSequenceClassification.from_pretrained("roberta-large", num_labels=2 ).train().to("cuda") class BertClassifier(Module): def __init__(self, bert): self.bert = bert def forward(self, x): return self.bert(x ).logits model = BertClassifier(bert )<choose_model_class>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer