kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
8,872,016
def bert_encode(texts, bert_layer, max_len=128): vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case) all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len - 2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) input_ids = tokens + [0]* pad_len all_tokens.append(input_ids) masks = [1]*len(input_sequence)+ [0]* pad_len all_masks.append(masks) segments = [0]* max_len all_segments.append(segments) return np.array(all_tokens), np.array(all_masks), np.array(all_segments) def build_model(bert_layer, max_len = 128, lr = 1e-5): input_word_ids = tf.keras.layers.Input(shape=(max_len,), dtype=tf.int32,name="input_word_ids") input_mask = tf.keras.layers.Input(shape=(max_len,), dtype=tf.int32,name="input_mask") segment_ids = tf.keras.layers.Input(shape=(max_len,), dtype=tf.int32,name="segment_ids") pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) dense_out = Dense(1,activation="relu" )(pooled_output) drop_out = tf.keras.layers.Dropout(0.8 )(dense_out) out = Dense(1,activation="sigmoid" )(pooled_output) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) adam = tf.keras.optimizers.Adam(lr) model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['accuracy']) return model def plot_curve(history): plt.plot(history.history['accuracy'], label='accuracy') plt.plot(history.history['val_accuracy'], label='val_accuracy') plt.xlabel('Epoch') plt.ylabel('Accuracy') plt.ylim([0.5,1]) plt.legend() plt.show()<choose_model_class>
datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1) history = datagen.fit(X_train )
Digit Recognizer
8,872,016
%%time module_url = "https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/1" bert_layer = hub.KerasLayer(module_url, trainable=True )<load_from_csv>
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=100), steps_per_epoch=len(X_train)/100, epochs=20, validation_data=(X_test, Y_test), callbacks=[reduce_lr] )
Digit Recognizer
8,872,016
train = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv") train_input = bert_encode(train.text.values, bert_layer, max_len=128) train_labels = np.array(train.target )<load_from_csv>
Digit Recognizer
8,872,016
test = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv") test_input = bert_encode(test.text.values, bert_layer, max_len=128) model.load_weights('model.h5') test_pred = model.predict(test_input )<save_to_csv>
score = model.evaluate(X_test, Y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1] )
Digit Recognizer
8,872,016
submission = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv") submission['target'] = np.round(test_pred ).astype('int') submission.to_csv('submission.csv', index=False) submission.groupby('target' ).count()<load_from_csv>
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
8,872,016
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') data.sample(10 )<count_duplicates>
test_data = test_data.values test_data = test_data.reshape(28000, 28, 28,1) test_data = test_data.astype('float32') test_data /= 255 print("Test data matrix shape", test_data.shape )
Digit Recognizer
8,872,016
text = data.text duplicates = data[text.isin(text[text.duplicated() ])].sort_values(by='text') conflicting_check = pd.DataFrame(duplicates.groupby(['text'] ).target.mean()) conflicting_check.sample(10 )<filter>
y_pred = model.predict_classes(test_data, verbose=0) print(y_pred )
Digit Recognizer
8,872,016
conflicting = conflicting_check.loc[(conflicting_check.target != 1)&(conflicting_check.target != 0)].index data = data.drop(data[text.isin(conflicting)].index) print('Conflicting samples count:', conflicting.shape[0] )<set_options>
i = 9713 predicted_value = np.argmax(model.predict(X_test[i].reshape(1,28, 28,1))) print('predicted value:',predicted_value) plt.imshow(X_test[i].reshape([28, 28]), cmap='Greys_r' )
Digit Recognizer
8,872,016
if torch.cuda.is_available() : device = torch.device("cuda") print('There are %d GPU(s)available.' % torch.cuda.device_count()) print('We will use the GPU:', torch.cuda.get_device_name(0)) else: print('No GPU available, using the CPU instead.') device = torch.device("cpu" )<install_modules>
submissions=pd.DataFrame({"ImageId": list(range(1,len(y_pred)+1)) , "Label": y_pred}) submissions.to_csv("LeNet_CNN.csv", index=False )
Digit Recognizer
8,872,016
!pip install transformers<define_variables>
!pip install emnist
Digit Recognizer
8,872,016
sentences = data.text.values labels =data.target.values<load_pretrained>
import matplotlib.pyplot as plt,seaborn as sns,pandas as pd,numpy as np from keras.models import Sequential, load_model from keras.layers.core import Dense, Dropout, Activation from keras.layers import Conv2D, MaxPooling2D,MaxPool2D,Flatten,BatchNormalization from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau from emnist import extract_training_samples from emnist import extract_test_samples from keras.optimizers import Adam
Digit Recognizer
8,872,016
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True )<categorify>
x_train, y_train = extract_training_samples('digits') x_test, y_test = extract_test_samples('digits' )
Digit Recognizer
8,872,016
print(' Original: ', sentences[0]) print('Tokenized: ', tokenizer.tokenize(sentences[0])) print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])) )<define_variables>
in_train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') ex_y_train = in_train_data["label"] ex_x_train = in_train_data.drop(labels = ["label"],axis = 1 )
Digit Recognizer
8,872,016
max_len = 0 for sent in sentences: input_ids = tokenizer.encode(sent, add_special_tokens=True) max_len = max(max_len, len(input_ids)) print('Max tweet length: ', max_len )<categorify>
X_train = x_train.reshape(240000, 28, 28,1) X_test = x_test.reshape(40000, 28, 28,1) ex_x_train = ex_x_train.values.reshape(42000,28,28,1) X_train = np.vstack(( X_train, ex_x_train)) print(X_train.shape )
Digit Recognizer
8,872,016
input_ids = [] attention_masks = [] for sent in sentences: encoded_dict = tokenizer.encode_plus( sent, add_special_tokens = True, max_length = 64, pad_to_max_length = True, return_attention_mask = True, return_tensors = 'pt', ) input_ids.append(encoded_dict['input_ids']) attention_masks.append(encoded_dict['attention_mask']) input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) labels = torch.tensor(labels) print('Original: ', sentences[0]) print('Token IDs:', input_ids[0] )<split>
X_train = X_train.astype('float32') X_test = X_test.astype('float32' )
Digit Recognizer
8,872,016
SPLIT = 0.999 dataset = TensorDataset(input_ids, attention_masks, labels) train_size = int(SPLIT * len(dataset)) val_size = len(dataset)- train_size train_dataset, val_dataset = random_split(dataset, [train_size, val_size]) print('{:>5,} training samples'.format(train_size)) print('{:>5,} validation samples'.format(val_size))<load_pretrained>
X_train /= 255 X_test /= 255
Digit Recognizer
8,872,016
batch_size = 32 train_dataloader = DataLoader( train_dataset, sampler = RandomSampler(train_dataset), batch_size = batch_size ) validation_dataloader = DataLoader( val_dataset, sampler = SequentialSampler(val_dataset), batch_size = batch_size )<load_pretrained>
y_train = np.concatenate([y_train,ex_y_train.values]) print(y_train.shape )
Digit Recognizer
8,872,016
model = BertForSequenceClassification.from_pretrained( "bert-base-uncased", num_labels = 2, output_attentions = False, output_hidden_states = False, ) model.cuda()<choose_model_class>
n_classes = 10 print("Shape before one-hot encoding: ", y_train.shape) Y_train = np_utils.to_categorical(y_train, n_classes) Y_test = np_utils.to_categorical(y_test, n_classes) print("Shape after one-hot encoding: ", Y_train.shape )
Digit Recognizer
8,872,016
optimizer = AdamW(model.parameters() , lr = 2e-5, eps = 1e-8 )<init_hyperparams>
model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPool2D(pool_size = 2,strides=2)) model.add(Conv2D(filters=48, kernel_size=(5,5), padding='valid', activation='relu')) model.add(MaxPool2D(pool_size = 2,strides=2)) model.add(Flatten()) model.add(Dense(120, activation='relu')) model.add(Dense(84, activation='relu')) model.add(Dense(10, activation='softmax')) adam = Adam(lr=5e-4) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam )
Digit Recognizer
8,872,016
epochs = 2 total_steps = len(train_dataloader)* epochs scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps )<compute_test_metric>
reduce_lr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.2, min_lr=1e-6 )
Digit Recognizer
8,872,016
def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1 ).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat)/ len(labels_flat )<define_variables>
datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1) history = datagen.fit(X_train )
Digit Recognizer
8,872,016
seed_val = 42 random.seed(seed_val) np.random.seed(seed_val) torch.manual_seed(seed_val) torch.cuda.manual_seed_all(seed_val) training_stats = [] total_t0 = time.time() for epoch_i in range(0, epochs): print("") print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs)) print('Training...') t0 = time.time() total_train_loss = 0 model.train() for step, batch in enumerate(train_dataloader): if step % 40 == 0 and not step == 0: elapsed = format_time(time.time() - t0) print(' Batch {:>5,} of {:>5,}.Elapsed: {:}.'.format(step, len(train_dataloader), elapsed)) b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) model.zero_grad() outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = outputs[0] logits = outputs[1] total_train_loss += loss.item() loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters() , 1.0) optimizer.step() scheduler.step() avg_train_loss = total_train_loss / len(train_dataloader) training_time = format_time(time.time() - t0) print("") print(" Average training loss: {0:.2f}".format(avg_train_loss)) print(" Training epcoh took: {:}".format(training_time)) print("") print("Running Validation...") t0 = time.time() model.eval() total_eval_accuracy = 0 total_eval_loss = 0 nb_eval_steps = 0 for batch in validation_dataloader: b_input_ids = batch[0].to(device) b_input_mask = batch[1].to(device) b_labels = batch[2].to(device) with torch.no_grad() : output = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask, labels=b_labels) loss = output[0] logits = output[1] total_eval_loss += loss.item() logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu' ).numpy() total_eval_accuracy += flat_accuracy(logits, label_ids) avg_val_accuracy = total_eval_accuracy / len(validation_dataloader) print(" Accuracy: {0:.2f}".format(avg_val_accuracy)) avg_val_loss = total_eval_loss / len(validation_dataloader) validation_time = format_time(time.time() - t0) print(" Validation Loss: {0:.2f}".format(avg_val_loss)) print(" Validation took: {:}".format(validation_time)) training_stats.append( { 'epoch': epoch_i + 1, 'Training Loss': avg_train_loss, 'Valid.Loss': avg_val_loss, 'Valid.Accur.': avg_val_accuracy, 'Training Time': training_time, 'Validation Time': validation_time } ) print("") print("Training complete!") print("Total training took {:}(h:mm:ss)".format(format_time(time.time() -total_t0)) )<create_dataframe>
history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=100), steps_per_epoch=len(X_train)/100, epochs=20, validation_data=(X_test, Y_test), callbacks=[reduce_lr] )
Digit Recognizer
8,872,016
pd.set_option('precision', 2) df_stats = pd.DataFrame(data=training_stats) df_stats = df_stats.set_index('epoch') df_stats<load_from_csv>
Digit Recognizer
8,872,016
test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') print('Number of test sentences: {:,} '.format(test_data.shape[0])) sentences = test_data.text.values input_ids = [] attention_masks = [] for sent in sentences: encoded_dict = tokenizer.encode_plus( sent, add_special_tokens = True, max_length = 64, pad_to_max_length = True, return_attention_mask = True, return_tensors = 'pt', ) input_ids.append(encoded_dict['input_ids']) attention_masks.append(encoded_dict['attention_mask']) input_ids = torch.cat(input_ids, dim=0) attention_masks = torch.cat(attention_masks, dim=0) batch_size = 32 prediction_data = TensorDataset(input_ids, attention_masks,) prediction_sampler = SequentialSampler(prediction_data) prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size )<predict_on_test>
score = model.evaluate(X_test, Y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1] )
Digit Recognizer
8,872,016
print('Predicting labels for {:,} test sentences...'.format(len(input_ids))) model.eval() predictions = [] for batch in prediction_dataloader: batch = tuple(t.to(device)for t in batch) b_input_ids, b_input_mask = batch with torch.no_grad() : outputs = model(b_input_ids, token_type_ids=None, attention_mask=b_input_mask) logits = outputs[0] logits = logits.detach().cpu().numpy() label_ids = b_labels.to('cpu' ).numpy() predictions.append(logits) print(' DONE.' )<define_variables>
test_data = pd.read_csv('/kaggle/input/digit-recognizer/test.csv' )
Digit Recognizer
8,872,016
flat_predictions = np.concatenate(predictions, axis=0) flat_predictions = np.argmax(flat_predictions, axis=1 ).flatten()<save_to_csv>
test_data = test_data.values test_data = test_data.reshape(28000, 28, 28,1) test_data = test_data.astype('float32') test_data /= 255 print("Test data matrix shape", test_data.shape )
Digit Recognizer
8,872,016
submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') submission.target = flat_predictions submission.to_csv('submission.csv', index=False )<set_options>
y_pred = model.predict_classes(test_data, verbose=0) print(y_pred )
Digit Recognizer
8,872,016
pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) pd.set_option('display.width', 1000) warnings.filterwarnings("ignore") eng_stopwords = set(stopwords.words("english"))<load_from_csv>
i = 9713 predicted_value = np.argmax(model.predict(X_test[i].reshape(1,28, 28,1))) print('predicted value:',predicted_value) plt.imshow(X_test[i].reshape([28, 28]), cmap='Greys_r' )
Digit Recognizer
8,872,016
train_df = pd.read_csv(".. /input/nlp-getting-started/train.csv") test_df = pd.read_csv(".. /input/nlp-getting-started/test.csv") submission = pd.read_csv(".. /input/nlp-getting-started/sample_submission.csv") print("Training Shape rows = {}, columns = {}".format(train_df.shape[0],train_df.shape[1])) print("Testing Shape rows = {}, columns = {}".format(test_df.shape[0],test_df.shape[1]))<count_missing_values>
submissions=pd.DataFrame({"ImageId": list(range(1,len(y_pred)+1)) , "Label": y_pred}) submissions.to_csv("LeNet_CNN.csv", index=False )
Digit Recognizer
7,764,469
train_df.isnull().sum()<count_missing_values>
random_seed = 2020 np.random.seed(random_seed)
Digit Recognizer
7,764,469
test_df.isnull().sum()<groupby>
train = pd.read_csv('.. /input/digit-recognizer/train.csv') test = pd.read_csv('.. /input/digit-recognizer/test.csv') Y = train['label'] X = train.drop(labels="label", axis=1) X = X.values.reshape(-1, 28, 28, 1)/ 255 test = test.values.reshape(-1, 28, 28, 1)/ 255 print(X.shape, test.shape )
Digit Recognizer
7,764,469
keyword_dist = train_df.groupby("keyword")['target'].value_counts().unstack(fill_value=0) keyword_dist = keyword_dist.add_prefix(keyword_dist.columns.name ).rename_axis(columns=None ).reset_index()<sort_values>
learning_rate_reduction = ReduceLROnPlateau(monitor = 'val_acc', patience = 3, verbose = 1, factor = 0.5, min_lr = 0.0001) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=15, restore_best_weights=True) def new_model(hidden=512, learning_rate=0.00128): INPUT = Input(( 28, 28, 1)) inputs = Conv2D(64,(5, 5), activation='relu', padding='same' )(INPUT) inputs = MaxPool2D(pool_size=(3,3), strides=(1,1))(inputs) inputs = BatchNormalization()(inputs) inputs = Activation('relu' )(inputs) inputs = Dropout(0.25 )(inputs) tower_1 = Conv2D(64,(1, 1), activation='relu', padding='same' )(inputs) tower_1 = Conv2D(128,(2, 2), activation='relu', padding='same' )(tower_1) tower_1 = Dropout(0.5 )(tower_1) tower_1 = Conv2D(256,(3, 3), activation='relu', padding='same' )(tower_1) tower_1 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_1) tower_1 = BatchNormalization()(tower_1) tower_2 = Conv2D(64,(2, 2), activation='relu', padding='same' )(inputs) tower_2 = Conv2D(128,(3, 3), activation='relu', padding='same' )(tower_2) tower_2 = Dropout(0.5 )(tower_2) tower_2 = Conv2D(256,(5, 5), activation='relu', padding='same' )(tower_2) tower_2 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_2) tower_2 = BatchNormalization()(tower_2) tower_3 = Conv2D(64,(1, 1), activation='relu', padding='same' )(inputs) tower_3 = Conv2D(128,(3, 3), activation='relu', padding='same' )(tower_3) tower_3 = Dropout(0.5 )(tower_3) tower_3 = Conv2D(256,(5, 5), activation='relu', padding='same' )(tower_3) tower_3 = MaxPool2D(pool_size=(3,3), strides=(2,2))(tower_3) tower_3 = BatchNormalization()(tower_3) x = Add()([tower_1, tower_2, tower_3]) x = Activation('relu' )(x) x = Conv2D(256,(5, 5), activation='relu', padding='same' )(x) x = MaxPool2D(pool_size=(5,5), strides=(4,4))(x) x = BatchNormalization()(x) x = Activation('relu' )(x) x = Flatten()(x) x = Dense(hidden, activation='relu' )(x) x = Dropout(0.5 )(x) x = Dense(hidden//4, activation='relu' )(x) x = Dropout(0.5 )(x) preds = Dense(10, activation='softmax', name='preds' )(x) model = Model(inputs=INPUT, outputs=preds) optimizer = Adam(lr=learning_rate) model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc']) return model model = new_model()
Digit Recognizer
7,764,469
keyword_dist.sort_values('target1',ascending = False ).head(10 )<sort_values>
datagen = ImageDataGenerator(featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range=0.1, shear_range=0.02, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False )
Digit Recognizer
7,764,469
keyword_dist.sort_values('target0',ascending = False ).head(10 )<feature_engineering>
epochs = 200 batch_size = 128 print("Learning Properties: Epoch:%i \t Batch Size:%i" %(epochs, batch_size)) predict_accumulator = np.zeros(model.predict(test ).shape) accumulated_history = [] for i in range(1, 6): X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.20, shuffle=True, random_state=random_seed*i) model = new_model(512, 0.01) datagen.fit(X_train) history = model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, validation_data=(X_val, Y_val), verbose=1, steps_per_epoch=X_train.shape[0]//batch_size, callbacks=[learning_rate_reduction, es], workers=4) loss, acc = model.evaluate(X, Y) if acc > 0.99: predict_accumulator += model.predict(test)*acc accumulated_history.append(history) print("Current Predictions on fold number %i" %i) print(*np.argmax(predict_accumulator, axis=1), sep='\t' )
Digit Recognizer
7,764,469
train_df['word_count'] = train_df['text'].apply(lambda x : len(str(x ).split())) test_df['word_count'] = test_df['text'].apply(lambda x : len(str(x ).split())) train_df['unique_word_count'] = train_df['text'].apply(lambda x : len(set(str(x ).split()))) test_df['unique_word_count'] = test_df['text'].apply(lambda x : len(set(str(x ).split()))) train_df['count_letters'] = train_df['text'].apply(lambda x : len(str(x))) test_df['count_letters'] = test_df['text'].apply(lambda x : len(str(x))) train_df['count_punctuations'] = train_df['text'].apply(lambda x: len([c for c in str(x)if c in string.punctuation])) test_df['count_punctuations'] = test_df['text'].apply(lambda x: len([c for c in str(x)if c in string.punctuation])) train_df['stop_word_count'] = train_df['text'].apply(lambda x: len([w for w in str(x ).lower().split() if w in eng_stopwords])) test_df['stop_word_count'] = test_df['text'].apply(lambda x: len([w for w in str(x ).lower().split() if w in eng_stopwords])) train_df['hashtag_count'] = train_df['text'].apply(lambda x : len([c for c in str(x)if c == ' test_df['hashtag_count'] = test_df['text'].apply(lambda x : len([c for c in str(x)if c == ' train_df['mention_count'] = train_df['text'].apply(lambda x : len([c for c in str(x)if c=='@'])) test_df['mention_count'] = test_df['text'].apply(lambda x : len([c for c in str(x)if c=='@']))<categorify>
print("Completed Training.") results = np.argmax(predict_accumulator, axis=1) results = pd.Series(results, name="Label") print("Saving prediction to output...") submission = pd.concat([pd.Series(range(1, 1+test.shape[0]), name="ImageId"), results], axis=1) submission.to_csv('submission.csv', index=False )
Digit Recognizer
7,764,469
<categorify><EOS>
end_time = time.time() total_time = int(end_time - start_time) print("Total time spent: %i hours, %i minutes, %i seconds" \ %(( total_time//3600),(total_time%3600)//60,(total_time%60)) )
Digit Recognizer
5,786,490
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
Digit Recognizer
5,786,490
def clean(tweet): tweet = re.sub(r"\x89Û_", "", tweet) tweet = re.sub(r"\x89ÛÒ", "", tweet) tweet = re.sub(r"\x89ÛÓ", "", tweet) tweet = re.sub(r"\x89ÛÏWhen", "When", tweet) tweet = re.sub(r"\x89ÛÏ", "", tweet) tweet = re.sub(r"China\x89Ûªs", "China's", tweet) tweet = re.sub(r"let\x89Ûªs", "let's", tweet) tweet = re.sub(r"\x89Û÷", "", tweet) tweet = re.sub(r"\x89Ûª", "", tweet) tweet = re.sub(r"\x89Û\x9d", "", tweet) tweet = re.sub(r"å_", "", tweet) tweet = re.sub(r"\x89Û¢", "", tweet) tweet = re.sub(r"\x89Û¢åÊ", "", tweet) tweet = re.sub(r"fromåÊwounds", "from wounds", tweet) tweet = re.sub(r"åÊ", "", tweet) tweet = re.sub(r"åÈ", "", tweet) tweet = re.sub(r"JapÌ_n", "Japan", tweet) tweet = re.sub(r"Ì©", "e", tweet) tweet = re.sub(r"å¨", "", tweet) tweet = re.sub(r"Surṳ", "Suruc", tweet) tweet = re.sub(r"åÇ", "", tweet) tweet = re.sub(r"å£3million", "3 million", tweet) tweet = re.sub(r"åÀ", "", tweet) tweet = re.sub(r"he's", "he is", tweet) tweet = re.sub(r"there's", "there is", tweet) tweet = re.sub(r"We're", "We are", tweet) tweet = re.sub(r"That's", "That is", tweet) tweet = re.sub(r"won't", "will not", tweet) tweet = re.sub(r"they're", "they are", tweet) tweet = re.sub(r"Can't", "Cannot", tweet) tweet = re.sub(r"wasn't", "was not", tweet) tweet = re.sub(r"don\x89Ûªt", "do not", tweet) tweet = re.sub(r"aren't", "are not", tweet) tweet = re.sub(r"isn't", "is not", tweet) tweet = re.sub(r"What's", "What is", tweet) tweet = re.sub(r"haven't", "have not", tweet) tweet = re.sub(r"hasn't", "has not", tweet) tweet = re.sub(r"There's", "There is", tweet) tweet = re.sub(r"He's", "He is", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"You're", "You are", tweet) tweet = re.sub(r"I'M", "I am", tweet) tweet = re.sub(r"shouldn't", "should not", tweet) tweet = re.sub(r"wouldn't", "would not", tweet) tweet = re.sub(r"i'm", "I am", tweet) tweet = re.sub(r"I\x89Ûªm", "I am", tweet) tweet = re.sub(r"I'm", "I am", tweet) tweet = re.sub(r"Isn't", "is not", tweet) tweet = re.sub(r"Here's", "Here is", tweet) tweet = re.sub(r"you've", "you have", tweet) tweet = re.sub(r"you\x89Ûªve", "you have", tweet) tweet = re.sub(r"we're", "we are", tweet) tweet = re.sub(r"what's", "what is", tweet) tweet = re.sub(r"couldn't", "could not", tweet) tweet = re.sub(r"we've", "we have", tweet) tweet = re.sub(r"it\x89Ûªs", "it is", tweet) tweet = re.sub(r"doesn\x89Ûªt", "does not", tweet) tweet = re.sub(r"It\x89Ûªs", "It is", tweet) tweet = re.sub(r"Here\x89Ûªs", "Here is", tweet) tweet = re.sub(r"who's", "who is", tweet) tweet = re.sub(r"I\x89Ûªve", "I have", tweet) tweet = re.sub(r"y'all", "you all", tweet) tweet = re.sub(r"can\x89Ûªt", "cannot", tweet) tweet = re.sub(r"would've", "would have", tweet) tweet = re.sub(r"it'll", "it will", tweet) tweet = re.sub(r"we'll", "we will", tweet) tweet = re.sub(r"wouldn\x89Ûªt", "would not", tweet) tweet = re.sub(r"We've", "We have", tweet) tweet = re.sub(r"he'll", "he will", tweet) tweet = re.sub(r"Y'all", "You all", tweet) tweet = re.sub(r"Weren't", "Were not", tweet) tweet = re.sub(r"Didn't", "Did not", tweet) tweet = re.sub(r"they'll", "they will", tweet) tweet = re.sub(r"they'd", "they would", tweet) tweet = re.sub(r"DON'T", "DO NOT", tweet) tweet = re.sub(r"That\x89Ûªs", "That is", tweet) tweet = re.sub(r"they've", "they have", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"should've", "should have", tweet) tweet = re.sub(r"You\x89Ûªre", "You are", tweet) tweet = re.sub(r"where's", "where is", tweet) tweet = re.sub(r"Don\x89Ûªt", "Do not", tweet) tweet = re.sub(r"we'd", "we would", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"weren't", "were not", tweet) tweet = re.sub(r"They're", "They are", tweet) tweet = re.sub(r"Can\x89Ûªt", "Cannot", tweet) tweet = re.sub(r"you\x89Ûªll", "you will", tweet) tweet = re.sub(r"I\x89Ûªd", "I would", tweet) tweet = re.sub(r"let's", "let us", tweet) tweet = re.sub(r"it's", "it is", tweet) tweet = re.sub(r"can't", "cannot", tweet) tweet = re.sub(r"don't", "do not", tweet) tweet = re.sub(r"you're", "you are", tweet) tweet = re.sub(r"i've", "I have", tweet) tweet = re.sub(r"that's", "that is", tweet) tweet = re.sub(r"i'll", "I will", tweet) tweet = re.sub(r"doesn't", "does not", tweet) tweet = re.sub(r"i'd", "I would", tweet) tweet = re.sub(r"didn't", "did not", tweet) tweet = re.sub(r"ain't", "am not", tweet) tweet = re.sub(r"you'll", "you will", tweet) tweet = re.sub(r"I've", "I have", tweet) tweet = re.sub(r"Don't", "do not", tweet) tweet = re.sub(r"I'll", "I will", tweet) tweet = re.sub(r"I'd", "I would", tweet) tweet = re.sub(r"Let's", "Let us", tweet) tweet = re.sub(r"you'd", "You would", tweet) tweet = re.sub(r"It's", "It is", tweet) tweet = re.sub(r"Ain't", "am not", tweet) tweet = re.sub(r"Haven't", "Have not", tweet) tweet = re.sub(r"Could've", "Could have", tweet) tweet = re.sub(r"youve", "you have", tweet) tweet = re.sub(r"donå«t", "do not", tweet) tweet = re.sub(r"&gt;", ">", tweet) tweet = re.sub(r"&lt;", "<", tweet) tweet = re.sub(r"&amp;", "&", tweet) tweet = re.sub(r"w/e", "whatever", tweet) tweet = re.sub(r"w/", "with", tweet) tweet = re.sub(r"USAgov", "USA government", tweet) tweet = re.sub(r"recentlu", "recently", tweet) tweet = re.sub(r"Ph0tos", "Photos", tweet) tweet = re.sub(r"amirite", "am I right", tweet) tweet = re.sub(r"exp0sed", "exposed", tweet) tweet = re.sub(r"<3", "love", tweet) tweet = re.sub(r"amageddon", "armageddon", tweet) tweet = re.sub(r"Trfc", "Traffic", tweet) tweet = re.sub(r"8/5/2015", "2015-08-05", tweet) tweet = re.sub(r"WindStorm", "Wind Storm", tweet) tweet = re.sub(r"8/6/2015", "2015-08-06", tweet) tweet = re.sub(r"10:38PM", "10:38 PM", tweet) tweet = re.sub(r"10:30pm", "10:30 PM", tweet) tweet = re.sub(r"16yr", "16 year", tweet) tweet = re.sub(r"lmao", "laughing my ass off", tweet) tweet = re.sub(r"TRAUMATISED", "traumatized", tweet) tweet = re.sub(r"IranDeal", "Iran Deal", tweet) tweet = re.sub(r"ArianaGrande", "Ariana Grande", tweet) tweet = re.sub(r"camilacabello97", "camila cabello", tweet) tweet = re.sub(r"RondaRousey", "Ronda Rousey", tweet) tweet = re.sub(r"MTVHottest", "MTV Hottest", tweet) tweet = re.sub(r"TrapMusic", "Trap Music", tweet) tweet = re.sub(r"ProphetMuhammad", "Prophet Muhammad", tweet) tweet = re.sub(r"PantherAttack", "Panther Attack", tweet) tweet = re.sub(r"StrategicPatience", "Strategic Patience", tweet) tweet = re.sub(r"socialnews", "social news", tweet) tweet = re.sub(r"NASAHurricane", "NASA Hurricane", tweet) tweet = re.sub(r"onlinecommunities", "online communities", tweet) tweet = re.sub(r"humanconsumption", "human consumption", tweet) tweet = re.sub(r"Typhoon-Devastated", "Typhoon Devastated", tweet) tweet = re.sub(r"Meat-Loving", "Meat Loving", tweet) tweet = re.sub(r"facialabuse", "facial abuse", tweet) tweet = re.sub(r"LakeCounty", "Lake County", tweet) tweet = re.sub(r"BeingAuthor", "Being Author", tweet) tweet = re.sub(r"withheavenly", "with heavenly", tweet) tweet = re.sub(r"thankU", "thank you", tweet) tweet = re.sub(r"iTunesMusic", "iTunes Music", tweet) tweet = re.sub(r"OffensiveContent", "Offensive Content", tweet) tweet = re.sub(r"WorstSummerJob", "Worst Summer Job", tweet) tweet = re.sub(r"HarryBeCareful", "Harry Be Careful", tweet) tweet = re.sub(r"NASASolarSystem", "NASA Solar System", tweet) tweet = re.sub(r"animalrescue", "animal rescue", tweet) tweet = re.sub(r"KurtSchlichter", "Kurt Schlichter", tweet) tweet = re.sub(r"aRmageddon", "armageddon", tweet) tweet = re.sub(r"Throwingknifes", "Throwing knives", tweet) tweet = re.sub(r"GodsLove", "God's Love", tweet) tweet = re.sub(r"bookboost", "book boost", tweet) tweet = re.sub(r"ibooklove", "I book love", tweet) tweet = re.sub(r"NestleIndia", "Nestle India", tweet) tweet = re.sub(r"realDonaldTrump", "Donald Trump", tweet) tweet = re.sub(r"DavidVonderhaar", "David Vonderhaar", tweet) tweet = re.sub(r"CecilTheLion", "Cecil The Lion", tweet) tweet = re.sub(r"weathernetwork", "weather network", tweet) tweet = re.sub(r"withBioterrorism&use", "with Bioterrorism & use", tweet) tweet = re.sub(r"Hostage&2", "Hostage & 2", tweet) tweet = re.sub(r"GOPDebate", "GOP Debate", tweet) tweet = re.sub(r"RickPerry", "Rick Perry", tweet) tweet = re.sub(r"frontpage", "front page", tweet) tweet = re.sub(r"NewsInTweets", "News In Tweets", tweet) tweet = re.sub(r"ViralSpell", "Viral Spell", tweet) tweet = re.sub(r"til_now", "until now", tweet) tweet = re.sub(r"volcanoinRussia", "volcano in Russia", tweet) tweet = re.sub(r"ZippedNews", "Zipped News", tweet) tweet = re.sub(r"MicheleBachman", "Michele Bachman", tweet) tweet = re.sub(r"53inch", "53 inch", tweet) tweet = re.sub(r"KerrickTrial", "Kerrick Trial", tweet) tweet = re.sub(r"abstorm", "Alberta Storm", tweet) tweet = re.sub(r"Beyhive", "Beyonce hive", tweet) tweet = re.sub(r"IDFire", "Idaho Fire", tweet) tweet = re.sub(r"DETECTADO", "Detected", tweet) tweet = re.sub(r"RockyFire", "Rocky Fire", tweet) tweet = re.sub(r"Listen/Buy", "Listen / Buy", tweet) tweet = re.sub(r"NickCannon", "Nick Cannon", tweet) tweet = re.sub(r"FaroeIslands", "Faroe Islands", tweet) tweet = re.sub(r"yycstorm", "Calgary Storm", tweet) tweet = re.sub(r"IDPs:", "Internally Displaced People :", tweet) tweet = re.sub(r"ArtistsUnited", "Artists United", tweet) tweet = re.sub(r"ClaytonBryant", "Clayton Bryant", tweet) tweet = re.sub(r"jimmyfallon", "jimmy fallon", tweet) tweet = re.sub(r"justinbieber", "justin bieber", tweet) tweet = re.sub(r"UTC2015", "UTC 2015", tweet) tweet = re.sub(r"Time2015", "Time 2015", tweet) tweet = re.sub(r"djicemoon", "dj icemoon", tweet) tweet = re.sub(r"LivingSafely", "Living Safely", tweet) tweet = re.sub(r"FIFA16", "Fifa 2016", tweet) tweet = re.sub(r"thisiswhywecanthavenicethings", "this is why we cannot have nice things", tweet) tweet = re.sub(r"bbcnews", "bbc news", tweet) tweet = re.sub(r"UndergroundRailraod", "Underground Railraod", tweet) tweet = re.sub(r"c4news", "c4 news", tweet) tweet = re.sub(r"OBLITERATION", "obliteration", tweet) tweet = re.sub(r"MUDSLIDE", "mudslide", tweet) tweet = re.sub(r"NoSurrender", "No Surrender", tweet) tweet = re.sub(r"NotExplained", "Not Explained", tweet) tweet = re.sub(r"greatbritishbakeoff", "great british bake off", tweet) tweet = re.sub(r"LondonFire", "London Fire", tweet) tweet = re.sub(r"KOTAWeather", "KOTA Weather", tweet) tweet = re.sub(r"LuchaUnderground", "Lucha Underground", tweet) tweet = re.sub(r"KOIN6News", "KOIN 6 News", tweet) tweet = re.sub(r"LiveOnK2", "Live On K2", tweet) tweet = re.sub(r"9NewsGoldCoast", "9 News Gold Coast", tweet) tweet = re.sub(r"nikeplus", "nike plus", tweet) tweet = re.sub(r"david_cameron", "David Cameron", tweet) tweet = re.sub(r"peterjukes", "Peter Jukes", tweet) tweet = re.sub(r"JamesMelville", "James Melville", tweet) tweet = re.sub(r"megynkelly", "Megyn Kelly", tweet) tweet = re.sub(r"cnewslive", "C News Live", tweet) tweet = re.sub(r"JamaicaObserver", "Jamaica Observer", tweet) tweet = re.sub(r"TweetLikeItsSeptember11th2001", "Tweet like it is september 11th 2001", tweet) tweet = re.sub(r"cbplawyers", "cbp lawyers", tweet) tweet = re.sub(r"fewmoretweets", "few more tweets", tweet) tweet = re.sub(r"BlackLivesMatter", "Black Lives Matter", tweet) tweet = re.sub(r"cjoyner", "Chris Joyner", tweet) tweet = re.sub(r"ENGvAUS", "England vs Australia", tweet) tweet = re.sub(r"ScottWalker", "Scott Walker", tweet) tweet = re.sub(r"MikeParrActor", "Michael Parr", tweet) tweet = re.sub(r"4PlayThursdays", "Foreplay Thursdays", tweet) tweet = re.sub(r"TGF2015", "Tontitown Grape Festival", tweet) tweet = re.sub(r"realmandyrain", "Mandy Rain", tweet) tweet = re.sub(r"GraysonDolan", "Grayson Dolan", tweet) tweet = re.sub(r"ApolloBrown", "Apollo Brown", tweet) tweet = re.sub(r"saddlebrooke", "Saddlebrooke", tweet) tweet = re.sub(r"TontitownGrape", "Tontitown Grape", tweet) tweet = re.sub(r"AbbsWinston", "Abbs Winston", tweet) tweet = re.sub(r"ShaunKing", "Shaun King", tweet) tweet = re.sub(r"MeekMill", "Meek Mill", tweet) tweet = re.sub(r"TornadoGiveaway", "Tornado Giveaway", tweet) tweet = re.sub(r"GRupdates", "GR updates", tweet) tweet = re.sub(r"SouthDowns", "South Downs", tweet) tweet = re.sub(r"braininjury", "brain injury", tweet) tweet = re.sub(r"auspol", "Australian politics", tweet) tweet = re.sub(r"PlannedParenthood", "Planned Parenthood", tweet) tweet = re.sub(r"calgaryweather", "Calgary Weather", tweet) tweet = re.sub(r"weallheartonedirection", "we all heart one direction", tweet) tweet = re.sub(r"edsheeran", "Ed Sheeran", tweet) tweet = re.sub(r"TrueHeroes", "True Heroes", tweet) tweet = re.sub(r"S3XLEAK", "sex leak", tweet) tweet = re.sub(r"ComplexMag", "Complex Magazine", tweet) tweet = re.sub(r"TheAdvocateMag", "The Advocate Magazine", tweet) tweet = re.sub(r"CityofCalgary", "City of Calgary", tweet) tweet = re.sub(r"EbolaOutbreak", "Ebola Outbreak", tweet) tweet = re.sub(r"SummerFate", "Summer Fate", tweet) tweet = re.sub(r"RAmag", "Royal Academy Magazine", tweet) tweet = re.sub(r"offers2go", "offers to go", tweet) tweet = re.sub(r"foodscare", "food scare", tweet) tweet = re.sub(r"MNPDNashville", "Metropolitan Nashville Police Department", tweet) tweet = re.sub(r"TfLBusAlerts", "TfL Bus Alerts", tweet) tweet = re.sub(r"GamerGate", "Gamer Gate", tweet) tweet = re.sub(r"IHHen", "Humanitarian Relief", tweet) tweet = re.sub(r"spinningbot", "spinning bot", tweet) tweet = re.sub(r"ModiMinistry", "Modi Ministry", tweet) tweet = re.sub(r"TAXIWAYS", "taxi ways", tweet) tweet = re.sub(r"Calum5SOS", "Calum Hood", tweet) tweet = re.sub(r"po_st", "po.st", tweet) tweet = re.sub(r"scoopit", "scoop.it", tweet) tweet = re.sub(r"UltimaLucha", "Ultima Lucha", tweet) tweet = re.sub(r"JonathanFerrell", "Jonathan Ferrell", tweet) tweet = re.sub(r"aria_ahrary", "Aria Ahrary", tweet) tweet = re.sub(r"rapidcity", "Rapid City", tweet) tweet = re.sub(r"OutBid", "outbid", tweet) tweet = re.sub(r"lavenderpoetrycafe", "lavender poetry cafe", tweet) tweet = re.sub(r"EudryLantiqua", "Eudry Lantiqua", tweet) tweet = re.sub(r"15PM", "15 PM", tweet) tweet = re.sub(r"OriginalFunko", "Funko", tweet) tweet = re.sub(r"rightwaystan", "Richard Tan", tweet) tweet = re.sub(r"CindyNoonan", "Cindy Noonan", tweet) tweet = re.sub(r"RT_America", "RT America", tweet) tweet = re.sub(r"narendramodi", "Narendra Modi", tweet) tweet = re.sub(r"BakeOffFriends", "Bake Off Friends", tweet) tweet = re.sub(r"TeamHendrick", "Hendrick Motorsports", tweet) tweet = re.sub(r"alexbelloli", "Alex Belloli", tweet) tweet = re.sub(r"itsjustinstuart", "Justin Stuart", tweet) tweet = re.sub(r"gunsense", "gun sense", tweet) tweet = re.sub(r"DebateQuestionsWeWantToHear", "debate questions we want to hear", tweet) tweet = re.sub(r"RoyalCarribean", "Royal Carribean", tweet) tweet = re.sub(r"samanthaturne19", "Samantha Turner", tweet) tweet = re.sub(r"JonVoyage", "Jon Stewart", tweet) tweet = re.sub(r"renew911health", "renew 911 health", tweet) tweet = re.sub(r"SuryaRay", "Surya Ray", tweet) tweet = re.sub(r"pattonoswalt", "Patton Oswalt", tweet) tweet = re.sub(r"minhazmerchant", "Minhaz Merchant", tweet) tweet = re.sub(r"TLVFaces", "Israel Diaspora Coalition", tweet) tweet = re.sub(r"pmarca", "Marc Andreessen", tweet) tweet = re.sub(r"pdx911", "Portland Police", tweet) tweet = re.sub(r"jamaicaplain", "Jamaica Plain", tweet) tweet = re.sub(r"Japton", "Arkansas", tweet) tweet = re.sub(r"RouteComplex", "Route Complex", tweet) tweet = re.sub(r"INSubcontinent", "Indian Subcontinent", tweet) tweet = re.sub(r"NJTurnpike", "New Jersey Turnpike", tweet) tweet = re.sub(r"Politifiact", "PolitiFact", tweet) tweet = re.sub(r"Hiroshima70", "Hiroshima", tweet) tweet = re.sub(r"GMMBC", "Greater Mt Moriah Baptist Church", tweet) tweet = re.sub(r"versethe", "verse the", tweet) tweet = re.sub(r"TubeStrike", "Tube Strike", tweet) tweet = re.sub(r"MissionHills", "Mission Hills", tweet) tweet = re.sub(r"ProtectDenaliWolves", "Protect Denali Wolves", tweet) tweet = re.sub(r"NANKANA", "Nankana", tweet) tweet = re.sub(r"SAHIB", "Sahib", tweet) tweet = re.sub(r"PAKPATTAN", "Pakpattan", tweet) tweet = re.sub(r"Newz_Sacramento", "News Sacramento", tweet) tweet = re.sub(r"gofundme", "go fund me", tweet) tweet = re.sub(r"pmharper", "Stephen Harper", tweet) tweet = re.sub(r"IvanBerroa", "Ivan Berroa", tweet) tweet = re.sub(r"LosDelSonido", "Los Del Sonido", tweet) tweet = re.sub(r"bancodeseries", "banco de series", tweet) tweet = re.sub(r"timkaine", "Tim Kaine", tweet) tweet = re.sub(r"IdentityTheft", "Identity Theft", tweet) tweet = re.sub(r"AllLivesMatter", "All Lives Matter", tweet) tweet = re.sub(r"mishacollins", "Misha Collins", tweet) tweet = re.sub(r"BillNeelyNBC", "Bill Neely", tweet) tweet = re.sub(r"BeClearOnCancer", "be clear on cancer", tweet) tweet = re.sub(r"Kowing", "Knowing", tweet) tweet = re.sub(r"ScreamQueens", "Scream Queens", tweet) tweet = re.sub(r"AskCharley", "Ask Charley", tweet) tweet = re.sub(r"BlizzHeroes", "Heroes of the Storm", tweet) tweet = re.sub(r"BradleyBrad47", "Bradley Brad", tweet) tweet = re.sub(r"HannaPH", "Typhoon Hanna", tweet) tweet = re.sub(r"meinlcymbals", "MEINL Cymbals", tweet) tweet = re.sub(r"Ptbo", "Peterborough", tweet) tweet = re.sub(r"cnnbrk", "CNN Breaking News", tweet) tweet = re.sub(r"IndianNews", "Indian News", tweet) tweet = re.sub(r"savebees", "save bees", tweet) tweet = re.sub(r"GreenHarvard", "Green Harvard", tweet) tweet = re.sub(r"StandwithPP", "Stand with planned parenthood", tweet) tweet = re.sub(r"hermancranston", "Herman Cranston", tweet) tweet = re.sub(r"WMUR9", "WMUR-TV", tweet) tweet = re.sub(r"RockBottomRadFM", "Rock Bottom Radio", tweet) tweet = re.sub(r"ameenshaikh3", "Ameen Shaikh", tweet) tweet = re.sub(r"ProSyn", "Project Syndicate", tweet) tweet = re.sub(r"Daesh", "ISIS", tweet) tweet = re.sub(r"s2g", "swear to god", tweet) tweet = re.sub(r"listenlive", "listen live", tweet) tweet = re.sub(r"CDCgov", "Centers for Disease Control and Prevention", tweet) tweet = re.sub(r"FoxNew", "Fox News", tweet) tweet = re.sub(r"CBSBigBrother", "Big Brother", tweet) tweet = re.sub(r"JulieDiCaro", "Julie DiCaro", tweet) tweet = re.sub(r"theadvocatemag", "The Advocate Magazine", tweet) tweet = re.sub(r"RohnertParkDPS", "Rohnert Park Police Department", tweet) tweet = re.sub(r"THISIZBWRIGHT", "Bonnie Wright", tweet) tweet = re.sub(r"Popularmmos", "Popular MMOs", tweet) tweet = re.sub(r"WildHorses", "Wild Horses", tweet) tweet = re.sub(r"FantasticFour", "Fantastic Four", tweet) tweet = re.sub(r"HORNDALE", "Horndale", tweet) tweet = re.sub(r"PINER", "Piner", tweet) tweet = re.sub(r"BathAndNorthEastSomerset", "Bath and North East Somerset", tweet) tweet = re.sub(r"thatswhatfriendsarefor", "that is what friends are for", tweet) tweet = re.sub(r"residualincome", "residual income", tweet) tweet = re.sub(r"YahooNewsDigest", "Yahoo News Digest", tweet) tweet = re.sub(r"MalaysiaAirlines", "Malaysia Airlines", tweet) tweet = re.sub(r"AmazonDeals", "Amazon Deals", tweet) tweet = re.sub(r"MissCharleyWebb", "Charley Webb", tweet) tweet = re.sub(r"shoalstraffic", "shoals traffic", tweet) tweet = re.sub(r"GeorgeFoster72", "George Foster", tweet) tweet = re.sub(r"pop2015", "pop 2015", tweet) tweet = re.sub(r"_PokemonCards_", "Pokemon Cards", tweet) tweet = re.sub(r"DianneG", "Dianne Gallagher", tweet) tweet = re.sub(r"KashmirConflict", "Kashmir Conflict", tweet) tweet = re.sub(r"BritishBakeOff", "British Bake Off", tweet) tweet = re.sub(r"FreeKashmir", "Free Kashmir", tweet) tweet = re.sub(r"mattmosley", "Matt Mosley", tweet) tweet = re.sub(r"BishopFred", "Bishop Fred", tweet) tweet = re.sub(r"EndConflict", "End Conflict", tweet) tweet = re.sub(r"EndOccupation", "End Occupation", tweet) tweet = re.sub(r"UNHEALED", "unhealed", tweet) tweet = re.sub(r"CharlesDagnall", "Charles Dagnall", tweet) tweet = re.sub(r"Latestnews", "Latest news", tweet) tweet = re.sub(r"KindleCountdown", "Kindle Countdown", tweet) tweet = re.sub(r"NoMoreHandouts", "No More Handouts", tweet) tweet = re.sub(r"datingtips", "dating tips", tweet) tweet = re.sub(r"charlesadler", "Charles Adler", tweet) tweet = re.sub(r"twia", "Texas Windstorm Insurance Association", tweet) tweet = re.sub(r"txlege", "Texas Legislature", tweet) tweet = re.sub(r"WindstormInsurer", "Windstorm Insurer", tweet) tweet = re.sub(r"Newss", "News", tweet) tweet = re.sub(r"hempoil", "hemp oil", tweet) tweet = re.sub(r"CommoditiesAre", "Commodities are", tweet) tweet = re.sub(r"tubestrike", "tube strike", tweet) tweet = re.sub(r"JoeNBC", "Joe Scarborough", tweet) tweet = re.sub(r"LiteraryCakes", "Literary Cakes", tweet) tweet = re.sub(r"TI5", "The International 5", tweet) tweet = re.sub(r"thehill", "the hill", tweet) tweet = re.sub(r"3others", "3 others", tweet) tweet = re.sub(r"stighefootball", "Sam Tighe", tweet) tweet = re.sub(r"whatstheimportantvideo", "what is the important video", tweet) tweet = re.sub(r"ClaudioMeloni", "Claudio Meloni", tweet) tweet = re.sub(r"DukeSkywalker", "Duke Skywalker", tweet) tweet = re.sub(r"carsonmwr", "Fort Carson", tweet) tweet = re.sub(r"offdishduty", "off dish duty", tweet) tweet = re.sub(r"andword", "and word", tweet) tweet = re.sub(r"rhodeisland", "Rhode Island", tweet) tweet = re.sub(r"easternoregon", "Eastern Oregon", tweet) tweet = re.sub(r"WAwildfire", "Washington Wildfire", tweet) tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet) tweet = re.sub(r"57am", "57 am", tweet) tweet = re.sub(r"fingerrockfire", "Finger Rock Fire", tweet) tweet = re.sub(r"JacobHoggard", "Jacob Hoggard", tweet) tweet = re.sub(r"newnewnew", "new new new", tweet) tweet = re.sub(r"under50", "under 50", tweet) tweet = re.sub(r"getitbeforeitsgone", "get it before it is gone", tweet) tweet = re.sub(r"freshoutofthebox", "fresh out of the box", tweet) tweet = re.sub(r"amwriting", "am writing", tweet) tweet = re.sub(r"Bokoharm", "Boko Haram", tweet) tweet = re.sub(r"Nowlike", "Now like", tweet) tweet = re.sub(r"seasonfrom", "season from", tweet) tweet = re.sub(r"epicente", "epicenter", tweet) tweet = re.sub(r"epicenterr", "epicenter", tweet) tweet = re.sub(r"sicklife", "sick life", tweet) tweet = re.sub(r"yycweather", "Calgary Weather", tweet) tweet = re.sub(r"calgarysun", "Calgary Sun", tweet) tweet = re.sub(r"approachng", "approaching", tweet) tweet = re.sub(r"evng", "evening", tweet) tweet = re.sub(r"Sumthng", "something", tweet) tweet = re.sub(r"EllenPompeo", "Ellen Pompeo", tweet) tweet = re.sub(r"shondarhimes", "Shonda Rhimes", tweet) tweet = re.sub(r"ABCNetwork", "ABC Network", tweet) tweet = re.sub(r"SushmaSwaraj", "Sushma Swaraj", tweet) tweet = re.sub(r"pray4japan", "Pray for Japan", tweet) tweet = re.sub(r"hope4japan", "Hope for Japan", tweet) tweet = re.sub(r"Illusionimagess", "Illusion images", tweet) tweet = re.sub(r"SummerUnderTheStars", "Summer Under The Stars", tweet) tweet = re.sub(r"ShallWeDance", "Shall We Dance", tweet) tweet = re.sub(r"TCMParty", "TCM Party", tweet) tweet = re.sub(r"marijuananews", "marijuana news", tweet) tweet = re.sub(r"onbeingwithKristaTippett", "on being with Krista Tippett", tweet) tweet = re.sub(r"Beingtweets", "Being tweets", tweet) tweet = re.sub(r"newauthors", "new authors", tweet) tweet = re.sub(r"remedyyyy", "remedy", tweet) tweet = re.sub(r"44PM", "44 PM", tweet) tweet = re.sub(r"HeadlinesApp", "Headlines App", tweet) tweet = re.sub(r"40PM", "40 PM", tweet) tweet = re.sub(r"myswc", "Severe Weather Center", tweet) tweet = re.sub(r"ithats", "that is", tweet) tweet = re.sub(r"icouldsitinthismomentforever", "I could sit in this moment forever", tweet) tweet = re.sub(r"FatLoss", "Fat Loss", tweet) tweet = re.sub(r"02PM", "02 PM", tweet) tweet = re.sub(r"MetroFmTalk", "Metro Fm Talk", tweet) tweet = re.sub(r"Bstrd", "bastard", tweet) tweet = re.sub(r"bldy", "bloody", tweet) tweet = re.sub(r"MetrofmTalk", "Metro Fm Talk", tweet) tweet = re.sub(r"terrorismturn", "terrorism turn", tweet) tweet = re.sub(r"BBCNewsAsia", "BBC News Asia", tweet) tweet = re.sub(r"BehindTheScenes", "Behind The Scenes", tweet) tweet = re.sub(r"GeorgeTakei", "George Takei", tweet) tweet = re.sub(r"WomensWeeklyMag", "Womens Weekly Magazine", tweet) tweet = re.sub(r"SurvivorsGuidetoEarth", "Survivors Guide to Earth", tweet) tweet = re.sub(r"incubusband", "incubus band", tweet) tweet = re.sub(r"Babypicturethis", "Baby picture this", tweet) tweet = re.sub(r"BombEffects", "Bomb Effects", tweet) tweet = re.sub(r"win10", "Windows 10", tweet) tweet = re.sub(r"idkidk", "I do not know I do not know", tweet) tweet = re.sub(r"TheWalkingDead", "The Walking Dead", tweet) tweet = re.sub(r"amyschumer", "Amy Schumer", tweet) tweet = re.sub(r"crewlist", "crew list", tweet) tweet = re.sub(r"Erdogans", "Erdogan", tweet) tweet = re.sub(r"BBCLive", "BBC Live", tweet) tweet = re.sub(r"TonyAbbottMHR", "Tony Abbott", tweet) tweet = re.sub(r"paulmyerscough", "Paul Myerscough", tweet) tweet = re.sub(r"georgegallagher", "George Gallagher", tweet) tweet = re.sub(r"JimmieJohnson", "Jimmie Johnson", tweet) tweet = re.sub(r"pctool", "pc tool", tweet) tweet = re.sub(r"DoingHashtagsRight", "Doing Hashtags Right", tweet) tweet = re.sub(r"ThrowbackThursday", "Throwback Thursday", tweet) tweet = re.sub(r"SnowBackSunday", "Snowback Sunday", tweet) tweet = re.sub(r"LakeEffect", "Lake Effect", tweet) tweet = re.sub(r"RTphotographyUK", "Richard Thomas Photography UK", tweet) tweet = re.sub(r"BigBang_CBS", "Big Bang CBS", tweet) tweet = re.sub(r"writerslife", "writers life", tweet) tweet = re.sub(r"NaturalBirth", "Natural Birth", tweet) tweet = re.sub(r"UnusualWords", "Unusual Words", tweet) tweet = re.sub(r"wizkhalifa", "Wiz Khalifa", tweet) tweet = re.sub(r"acreativedc", "a creative DC", tweet) tweet = re.sub(r"vscodc", "vsco DC", tweet) tweet = re.sub(r"VSCOcam", "vsco camera", tweet) tweet = re.sub(r"TheBEACHDC", "The beach DC", tweet) tweet = re.sub(r"buildingmuseum", "building museum", tweet) tweet = re.sub(r"WorldOil", "World Oil", tweet) tweet = re.sub(r"redwedding", "red wedding", tweet) tweet = re.sub(r"AmazingRaceCanada", "Amazing Race Canada", tweet) tweet = re.sub(r"WakeUpAmerica", "Wake Up America", tweet) tweet = re.sub(r"\\Allahuakbar\", "Allahu Akbar", tweet) tweet = re.sub(r"bleased", "blessed", tweet) tweet = re.sub(r"nigeriantribune", "Nigerian Tribune", tweet) tweet = re.sub(r"HIDEO_KOJIMA_EN", "Hideo Kojima", tweet) tweet = re.sub(r"FusionFestival", "Fusion Festival", tweet) tweet = re.sub(r"50Mixed", "50 Mixed", tweet) tweet = re.sub(r"NoAgenda", "No Agenda", tweet) tweet = re.sub(r"WhiteGenocide", "White Genocide", tweet) tweet = re.sub(r"dirtylying", "dirty lying", tweet) tweet = re.sub(r"SyrianRefugees", "Syrian Refugees", tweet) tweet = re.sub(r"changetheworld", "change the world", tweet) tweet = re.sub(r"Ebolacase", "Ebola case", tweet) tweet = re.sub(r"mcgtech", "mcg technologies", tweet) tweet = re.sub(r"withweapons", "with weapons", tweet) tweet = re.sub(r"advancedwarfare", "advanced warfare", tweet) tweet = re.sub(r"letsFootball", "let us Football", tweet) tweet = re.sub(r"LateNiteMix", "late night mix", tweet) tweet = re.sub(r"PhilCollinsFeed", "Phil Collins", tweet) tweet = re.sub(r"RudyHavenstein", "Rudy Havenstein", tweet) tweet = re.sub(r"22PM", "22 PM", tweet) tweet = re.sub(r"54am", "54 AM", tweet) tweet = re.sub(r"38am", "38 AM", tweet) tweet = re.sub(r"OldFolkExplainStuff", "Old Folk Explain Stuff", tweet) tweet = re.sub(r"BlacklivesMatter", "Black Lives Matter", tweet) tweet = re.sub(r"InsaneLimits", "Insane Limits", tweet) tweet = re.sub(r"youcantsitwithus", "you cannot sit with us", tweet) tweet = re.sub(r"2k15", "2015", tweet) tweet = re.sub(r"TheIran", "Iran", tweet) tweet = re.sub(r"JimmyFallon", "Jimmy Fallon", tweet) tweet = re.sub(r"AlbertBrooks", "Albert Brooks", tweet) tweet = re.sub(r"defense_news", "defense news", tweet) tweet = re.sub(r"nuclearrcSA", "Nuclear Risk Control Self Assessment", tweet) tweet = re.sub(r"Auspol", "Australia Politics", tweet) tweet = re.sub(r"NuclearPower", "Nuclear Power", tweet) tweet = re.sub(r"WhiteTerrorism", "White Terrorism", tweet) tweet = re.sub(r"truthfrequencyradio", "Truth Frequency Radio", tweet) tweet = re.sub(r"ErasureIsNotEquality", "Erasure is not equality", tweet) tweet = re.sub(r"ProBonoNews", "Pro Bono News", tweet) tweet = re.sub(r"JakartaPost", "Jakarta Post", tweet) tweet = re.sub(r"toopainful", "too painful", tweet) tweet = re.sub(r"melindahaunton", "Melinda Haunton", tweet) tweet = re.sub(r"NoNukes", "No Nukes", tweet) tweet = re.sub(r"curryspcworld", "Currys PC World", tweet) tweet = re.sub(r"ineedcake", "I need cake", tweet) tweet = re.sub(r"blackforestgateau", "black forest gateau", tweet) tweet = re.sub(r"BBCOne", "BBC One", tweet) tweet = re.sub(r"AlexxPage", "Alex Page", tweet) tweet = re.sub(r"jonathanserrie", "Jonathan Serrie", tweet) tweet = re.sub(r"SocialJerkBlog", "Social Jerk Blog", tweet) tweet = re.sub(r"ChelseaVPeretti", "Chelsea Peretti", tweet) tweet = re.sub(r"irongiant", "iron giant", tweet) tweet = re.sub(r"RonFunches", "Ron Funches", tweet) tweet = re.sub(r"TimCook", "Tim Cook", tweet) tweet = re.sub(r"sebastianstanisaliveandwell", "Sebastian Stan is alive and well", tweet) tweet = re.sub(r"Madsummer", "Mad summer", tweet) tweet = re.sub(r"NowYouKnow", "Now you know", tweet) tweet = re.sub(r"concertphotography", "concert photography", tweet) tweet = re.sub(r"TomLandry", "Tom Landry", tweet) tweet = re.sub(r"showgirldayoff", "show girl day off", tweet) tweet = re.sub(r"Yougslavia", "Yugoslavia", tweet) tweet = re.sub(r"QuantumDataInformatics", "Quantum Data Informatics", tweet) tweet = re.sub(r"FromTheDesk", "From The Desk", tweet) tweet = re.sub(r"TheaterTrial", "Theater Trial", tweet) tweet = re.sub(r"CatoInstitute", "Cato Institute", tweet) tweet = re.sub(r"EmekaGift", "Emeka Gift", tweet) tweet = re.sub(r"LetsBe_Rational", "Let us be rational", tweet) tweet = re.sub(r"Cynicalreality", "Cynical reality", tweet) tweet = re.sub(r"FredOlsenCruise", "Fred Olsen Cruise", tweet) tweet = re.sub(r"NotSorry", "not sorry", tweet) tweet = re.sub(r"UseYourWords", "use your words", tweet) tweet = re.sub(r"WordoftheDay", "word of the day", tweet) tweet = re.sub(r"Dictionarycom", "Dictionary.com", tweet) tweet = re.sub(r"TheBrooklynLife", "The Brooklyn Life", tweet) tweet = re.sub(r"jokethey", "joke they", tweet) tweet = re.sub(r"nflweek1picks", "NFL week 1 picks", tweet) tweet = re.sub(r"uiseful", "useful", tweet) tweet = re.sub(r"JusticeDotOrg", "The American Association for Justice", tweet) tweet = re.sub(r"autoaccidents", "auto accidents", tweet) tweet = re.sub(r"SteveGursten", "Steve Gursten", tweet) tweet = re.sub(r"MichiganAutoLaw", "Michigan Auto Law", tweet) tweet = re.sub(r"birdgang", "bird gang", tweet) tweet = re.sub(r"nflnetwork", "NFL Network", tweet) tweet = re.sub(r"NYDNSports", "NY Daily News Sports", tweet) tweet = re.sub(r"RVacchianoNYDN", "Ralph Vacchiano NY Daily News", tweet) tweet = re.sub(r"EdmontonEsks", "Edmonton Eskimos", tweet) tweet = re.sub(r"david_brelsford", "David Brelsford", tweet) tweet = re.sub(r"TOI_India", "The Times of India", tweet) tweet = re.sub(r"hegot", "he got", tweet) tweet = re.sub(r"SkinsOn9", "Skins on 9", tweet) tweet = re.sub(r"sothathappened", "so that happened", tweet) tweet = re.sub(r"LCOutOfDoors", "LC Out Of Doors", tweet) tweet = re.sub(r"NationFirst", "Nation First", tweet) tweet = re.sub(r"IndiaToday", "India Today", tweet) tweet = re.sub(r"HLPS", "helps", tweet) tweet = re.sub(r"HOSTAGESTHROSW", "hostages throw", tweet) tweet = re.sub(r"SNCTIONS", "sanctions", tweet) tweet = re.sub(r"BidTime", "Bid Time", tweet) tweet = re.sub(r"crunchysensible", "crunchy sensible", tweet) tweet = re.sub(r"RandomActsOfRomance", "Random acts of romance", tweet) tweet = re.sub(r"MomentsAtHill", "Moments at hill", tweet) tweet = re.sub(r"eatshit", "eat shit", tweet) tweet = re.sub(r"liveleakfun", "live leak fun", tweet) tweet = re.sub(r"SahelNews", "Sahel News", tweet) tweet = re.sub(r"abc7newsbayarea", "ABC 7 News Bay Area", tweet) tweet = re.sub(r"facilitiesmanagement", "facilities management", tweet) tweet = re.sub(r"facilitydude", "facility dude", tweet) tweet = re.sub(r"CampLogistics", "Camp logistics", tweet) tweet = re.sub(r"alaskapublic", "Alaska public", tweet) tweet = re.sub(r"MarketResearch", "Market Research", tweet) tweet = re.sub(r"AccuracyEsports", "Accuracy Esports", tweet) tweet = re.sub(r"TheBodyShopAust", "The Body Shop Australia", tweet) tweet = re.sub(r"yychail", "Calgary hail", tweet) tweet = re.sub(r"yyctraffic", "Calgary traffic", tweet) tweet = re.sub(r"eliotschool", "eliot school", tweet) tweet = re.sub(r"TheBrokenCity", "The Broken City", tweet) tweet = re.sub(r"OldsFireDept", "Olds Fire Department", tweet) tweet = re.sub(r"RiverComplex", "River Complex", tweet) tweet = re.sub(r"fieldworksmells", "field work smells", tweet) tweet = re.sub(r"IranElection", "Iran Election", tweet) tweet = re.sub(r"glowng", "glowing", tweet) tweet = re.sub(r"kindlng", "kindling", tweet) tweet = re.sub(r"riggd", "rigged", tweet) tweet = re.sub(r"slownewsday", "slow news day", tweet) tweet = re.sub(r"MyanmarFlood", "Myanmar Flood", tweet) tweet = re.sub(r"abc7chicago", "ABC 7 Chicago", tweet) tweet = re.sub(r"copolitics", "Colorado Politics", tweet) tweet = re.sub(r"AdilGhumro", "Adil Ghumro", tweet) tweet = re.sub(r"netbots", "net bots", tweet) tweet = re.sub(r"byebyeroad", "bye bye road", tweet) tweet = re.sub(r"massiveflooding", "massive flooding", tweet) tweet = re.sub(r"EndofUS", "End of United States", tweet) tweet = re.sub(r"35PM", "35 PM", tweet) tweet = re.sub(r"greektheatrela", "Greek Theatre Los Angeles", tweet) tweet = re.sub(r"76mins", "76 minutes", tweet) tweet = re.sub(r"publicsafetyfirst", "public safety first", tweet) tweet = re.sub(r"livesmatter", "lives matter", tweet) tweet = re.sub(r"myhometown", "my hometown", tweet) tweet = re.sub(r"tankerfire", "tanker fire", tweet) tweet = re.sub(r"MEMORIALDAY", "memorial day", tweet) tweet = re.sub(r"MEMORIAL_DAY", "memorial day", tweet) tweet = re.sub(r"instaxbooty", "instagram booty", tweet) tweet = re.sub(r"Jerusalem_Post", "Jerusalem Post", tweet) tweet = re.sub(r"WayneRooney_INA", "Wayne Rooney", tweet) tweet = re.sub(r"VirtualReality", "Virtual Reality", tweet) tweet = re.sub(r"OculusRift", "Oculus Rift", tweet) tweet = re.sub(r"OwenJones84", "Owen Jones", tweet) tweet = re.sub(r"jeremycorbyn", "Jeremy Corbyn", tweet) tweet = re.sub(r"paulrogers002", "Paul Rogers", tweet) tweet = re.sub(r"mortalkombatx", "Mortal Kombat X", tweet) tweet = re.sub(r"mortalkombat", "Mortal Kombat", tweet) tweet = re.sub(r"FilipeCoelho92", "Filipe Coelho", tweet) tweet = re.sub(r"OnlyQuakeNews", "Only Quake News", tweet) tweet = re.sub(r"kostumes", "costumes", tweet) tweet = re.sub(r"YEEESSSS", "yes", tweet) tweet = re.sub(r"ToshikazuKatayama", "Toshikazu Katayama", tweet) tweet = re.sub(r"IntlDevelopment", "Intl Development", tweet) tweet = re.sub(r"ExtremeWeather", "Extreme Weather", tweet) tweet = re.sub(r"WereNotGruberVoters", "We are not gruber voters", tweet) tweet = re.sub(r"NewsThousands", "News Thousands", tweet) tweet = re.sub(r"EdmundAdamus", "Edmund Adamus", tweet) tweet = re.sub(r"EyewitnessWV", "Eye witness WV", tweet) tweet = re.sub(r"PhiladelphiaMuseu", "Philadelphia Museum", tweet) tweet = re.sub(r"DublinComicCon", "Dublin Comic Con", tweet) tweet = re.sub(r"NicholasBrendon", "Nicholas Brendon", tweet) tweet = re.sub(r"Alltheway80s", "All the way 80s", tweet) tweet = re.sub(r"FromTheField", "From the field", tweet) tweet = re.sub(r"NorthIowa", "North Iowa", tweet) tweet = re.sub(r"WillowFire", "Willow Fire", tweet) tweet = re.sub(r"MadRiverComplex", "Mad River Complex", tweet) tweet = re.sub(r"feelingmanly", "feeling manly", tweet) tweet = re.sub(r"stillnotoverit", "still not over it", tweet) tweet = re.sub(r"FortitudeValley", "Fortitude Valley", tweet) tweet = re.sub(r"CoastpowerlineTramTr", "Coast powerline", tweet) tweet = re.sub(r"ServicesGold", "Services Gold", tweet) tweet = re.sub(r"NewsbrokenEmergency", "News broken emergency", tweet) tweet = re.sub(r"Evaucation", "evacuation", tweet) tweet = re.sub(r"leaveevacuateexitbe", "leave evacuate exit be", tweet) tweet = re.sub(r"P_EOPLE", "PEOPLE", tweet) tweet = re.sub(r"Tubestrike", "tube strike", tweet) tweet = re.sub(r"CLASS_SICK", "CLASS SICK", tweet) tweet = re.sub(r"localplumber", "local plumber", tweet) tweet = re.sub(r"awesomejobsiri", "awesome job siri", tweet) tweet = re.sub(r"PayForItHow", "Pay for it how", tweet) tweet = re.sub(r"ThisIsAfrica", "This is Africa", tweet) tweet = re.sub(r"crimeairnetwork", "crime air network", tweet) tweet = re.sub(r"KimAcheson", "Kim Acheson", tweet) tweet = re.sub(r"cityofcalgary", "City of Calgary", tweet) tweet = re.sub(r"prosyndicate", "pro syndicate", tweet) tweet = re.sub(r"660NEWS", "660 NEWS", tweet) tweet = re.sub(r"BusInsMagazine", "Business Insurance Magazine", tweet) tweet = re.sub(r"wfocus", "focus", tweet) tweet = re.sub(r"ShastaDam", "Shasta Dam", tweet) tweet = re.sub(r"go2MarkFranco", "Mark Franco", tweet) tweet = re.sub(r"StephGHinojosa", "Steph Hinojosa", tweet) tweet = re.sub(r"Nashgrier", "Nash Grier", tweet) tweet = re.sub(r"NashNewVideo", "Nash new video", tweet) tweet = re.sub(r"IWouldntGetElectedBecause", "I would not get elected because", tweet) tweet = re.sub(r"SHGames", "Sledgehammer Games", tweet) tweet = re.sub(r"bedhair", "bed hair", tweet) tweet = re.sub(r"JoelHeyman", "Joel Heyman", tweet) tweet = re.sub(r"viaYouTube", "via YouTube", tweet) tweet = re.sub(r"https?:\/\/t.co\/[A-Za-z0-9]+", "", tweet) punctuations = '@ for p in punctuations: tweet = tweet.replace(p, f' {p} ') tweet = tweet.replace('...', '...') if '...' not in tweet: tweet = tweet.replace('.. ', '...') tweet = re.sub(r"MH370", "Malaysia Airlines Flight 370", tweet) tweet = re.sub(r"m̼sica", "music", tweet) tweet = re.sub(r"okwx", "Oklahoma City Weather", tweet) tweet = re.sub(r"arwx", "Arkansas Weather", tweet) tweet = re.sub(r"gawx", "Georgia Weather", tweet) tweet = re.sub(r"scwx", "South Carolina Weather", tweet) tweet = re.sub(r"cawx", "California Weather", tweet) tweet = re.sub(r"tnwx", "Tennessee Weather", tweet) tweet = re.sub(r"azwx", "Arizona Weather", tweet) tweet = re.sub(r"alwx", "Alabama Weather", tweet) tweet = re.sub(r"wordpressdotcom", "wordpress", tweet) tweet = re.sub(r"usNWSgov", "United States National Weather Service", tweet) tweet = re.sub(r"Suruc", "Sanliurfa", tweet) tweet = re.sub(r"Bestnaijamade", "bestnaijamade", tweet) tweet = re.sub(r"SOUDELOR", "Soudelor", tweet) tweet = re.sub(u"\U0001F600-\U0001F64F","", tweet) tweet = re.sub(u"\U0001F300-\U0001F5FF","", tweet) tweet = re.sub(u"\U0001F680-\U0001F6FF","", tweet) tweet = re.sub(u"\U0001F1E0-\U0001F1FF","", tweet) tweet = re.sub(u"\U00002702-\U000027B0","", tweet) tweet = re.sub(u"\U000024C2-\U0001F251","", tweet) return tweet train_df['text_cleaned'] = train_df['text'].apply(lambda s : clean(s)) test_df['text_cleaned'] = test_df['text'].apply(lambda s : clean(s))<categorify>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
5,786,490
def encode(texts, tokenizer, max_len=512): all_tokens = [] all_masks = [] all_segments = [] for text in texts: text = tokenizer.tokenize(text) text = text[:max_len-2] input_sequence = ["[CLS]"] + text + ["[SEP]"] pad_len = max_len - len(input_sequence) tokens = tokenizer.convert_tokens_to_ids(input_sequence) tokens += [0] * pad_len pad_masks = [1] * len(input_sequence)+ [0] * pad_len segment_ids = [0] * max_len all_tokens.append(tokens) all_masks.append(pad_masks) all_segments.append(segment_ids) return np.array(all_tokens), np.array(all_masks), np.array(all_segments )<choose_model_class>
X=train.iloc[:,1:].values Y=train.iloc[:,0].values
Digit Recognizer
5,786,490
def build_model(bert_layer, max_len=512): input_word_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids") input_mask = Input(shape=(max_len,), dtype=tf.int32, name="input_mask") segment_ids = Input(shape=(max_len,), dtype=tf.int32, name="segment_ids") _, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids]) clf_output = sequence_output[:, 0, :] out = Dense(1, activation='sigmoid' )(clf_output) model = Model(inputs=[input_word_ids, input_mask, segment_ids], outputs=out) model.compile(Adam(lr=1e-5), loss='binary_crossentropy', metrics=['accuracy']) return model<choose_model_class>
X = X.reshape(X.shape[0], 28, 28,1) print(X.shape) Y = keras.utils.to_categorical(Y, 10) print(Y.shape )
Digit Recognizer
5,786,490
%%time bert_layer = hub.KerasLayer('https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1', trainable=True )<feature_engineering>
X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, test_size = 0.15, random_state=42 )
Digit Recognizer
5,786,490
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy() do_lower_case = bert_layer.resolved_object.do_lower_case.numpy() tokenizer = tokenization.FullTokenizer(vocab_file, do_lower_case )<categorify>
train_datagen = ImageDataGenerator(rescale = 1./255., rotation_range = 10, width_shift_range = 0.15, height_shift_range = 0.15, shear_range = 0.1, zoom_range = 0.2, horizontal_flip = False )
Digit Recognizer
5,786,490
train_input = encode(train_df.text_cleaned.values, tokenizer, max_len=160) test_input = encode(test_df.text_cleaned.values, tokenizer, max_len=160) train_labels = train_df.target.values<train_model>
valid_datagen = ImageDataGenerator(rescale=1./255 )
Digit Recognizer
5,786,490
checkpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True) train_history = model.fit( train_input, train_labels, validation_split=0.2, epochs=3, callbacks=[checkpoint], batch_size=32 )<predict_on_test>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64,(3,3), padding='same', input_shape=(28, 28, 1)) , tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.Conv2D(64,(3,3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(64,(3,3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.Conv2D(128,(3,3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Conv2D(128,(3,3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.Conv2D(256,(3,3), padding='same'), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.MaxPooling2D(2,2), tf.keras.layers.Dropout(0.2), tf.keras.layers.Flatten() , tf.keras.layers.Dense(256), tf.keras.layers.LeakyReLU(alpha=0.1), tf.keras.layers.BatchNormalization() , tf.keras.layers.Dense(10, activation='softmax') ] )
Digit Recognizer
5,786,490
model.load_weights('model.h5') test_pred_BERT = model.predict(test_input) test_pred_BERT_int = test_pred_BERT.round().astype('int' )<save_to_csv>
initial_learningrate=1e-3 batch_size = 128 epochs = 40 input_shape =(28, 28, 1 )
Digit Recognizer
5,786,490
submission['target'] = test_pred_BERT_int submission.to_csv("submission_BERT.csv", index=False, header=True )<import_modules>
def lr_decay(epoch): return initial_learningrate * 0.9 ** epoch
Digit Recognizer
5,786,490
import pandas as pd from tqdm import tqdm<load_from_csv>
model.compile(loss="categorical_crossentropy", optimizer=RMSprop(lr=initial_learningrate), metrics=['accuracy'] )
Digit Recognizer
5,786,490
train = pd.read_csv('.. /input/ames-housing-dataset/AmesHousing.csv') train.drop(['PID'], axis=1, inplace=True) origin = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/train.csv') train.columns = origin.columns test = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/test.csv') submission = pd.read_csv('.. /input/house-prices-advanced-regression-techniques/sample_submission.csv') print('Train:{} Test:{}'.format(train.shape,test.shape))<drop_column>
history = model.fit_generator( train_datagen.flow(X_train,Y_train, batch_size=batch_size), steps_per_epoch=100, epochs=epochs, callbacks=[LearningRateScheduler(lr_decay) ], validation_data=valid_datagen.flow(X_valid,Y_valid), validation_steps=50, verbose=2 )
Digit Recognizer
5,786,490
missing = test.isnull().sum() missing = missing[missing>0] train.drop(missing.index, axis=1, inplace=True) train.drop(['Electrical'], axis=1, inplace=True) test.dropna(axis=1, inplace=True) test.drop(['Electrical'], axis=1, inplace=True )<feature_engineering>
predictions = model.predict_classes(x_test/255.)
Digit Recognizer
5,786,490
l_test = tqdm(range(0, len(test)) , desc='Matching') for i in l_test: for j in range(0, len(train)) : for k in range(1, len(test.columns)) : if test.iloc[i,k] == train.iloc[j,k]: continue else: break else: submission.iloc[i, 1] = train.iloc[j, -1] break l_test.close()<save_to_csv>
final=pd.DataFrame({"ImageId": list(range(1,len(predictions)+1)) , "Label": predictions} )
Digit Recognizer
5,786,490
<import_modules><EOS>
final.to_csv("cnn_submission.csv",index=False)
Digit Recognizer
2,539,513
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
%matplotlib inline
Digit Recognizer
2,539,513
def load_data() : data_dir = Path(".. /input/house-prices-advanced-regression-techniques/") df_train = pd.read_csv(data_dir / "train.csv", index_col="Id") df_test = pd.read_csv(data_dir / "test.csv", index_col="Id") df = pd.concat([df_train, df_test]) df = clean(df) df = encode(df) df = impute_plus(df) df_train = df.loc[df_train.index, :] df_test = df.loc[df_test.index, :] return df_train, df_test<load_from_csv>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
2,539,513
data_dir = Path(".. /input/house-prices-advanced-regression-techniques/") df = pd.read_csv(data_dir / "train.csv", index_col="Id") df.Exterior2nd.unique()<feature_engineering>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
2,539,513
def clean(df): df['Exterior2nd'] = df['Exterior2nd'].replace({'Brk Cmn': 'BrkComm'}) df['GarageYrBlt'] = df['GarageYrBlt'].where(df.GarageYrBlt <= 2010, df.YearBuilt) df.rename(columns={ '1stFlrSF': 'FirstFlrSF', '2ndFlrSF': 'SecondFlrSF', '3SsnPorch': 'Threeseasonporch' }, inplace=True) return df<define_variables>
X_train = train.drop(labels = ["label"],axis = 1) Y_train = train["label"] len(Y_train )
Digit Recognizer
2,539,513
features_nom = ["MSSubClass", "MSZoning", "Street", "Alley", "LandContour", "LotConfig", "Neighborhood", "Condition1", "Condition2", "BldgType", "HouseStyle", "RoofStyle", "RoofMatl", "Exterior1st", "Exterior2nd", "MasVnrType", "Foundation", "Heating", "CentralAir", "GarageType", "MiscFeature", "SaleType", "SaleCondition"] five_levels = ["Po", "Fa", "TA", "Gd", "Ex"] ten_levels = list(range(10)) ordered_levels = { "OverallQual": ten_levels, "OverallCond": ten_levels, "ExterQual": five_levels, "ExterCond": five_levels, "BsmtQual": five_levels, "BsmtCond": five_levels, "HeatingQC": five_levels, "KitchenQual": five_levels, "FireplaceQu": five_levels, "GarageQual": five_levels, "GarageCond": five_levels, "PoolQC": five_levels, "LotShape": ["Reg", "IR1", "IR2", "IR3"], "LandSlope": ["Sev", "Mod", "Gtl"], "BsmtExposure": ["No", "Mn", "Av", "Gd"], "BsmtFinType1": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"], "BsmtFinType2": ["Unf", "LwQ", "Rec", "BLQ", "ALQ", "GLQ"], "Functional": ["Sal", "Sev", "Maj1", "Maj2", "Mod", "Min2", "Min1", "Typ"], "GarageFinish": ["Unf", "RFn", "Fin"], "PavedDrive": ["N", "P", "Y"], "Utilities": ["NoSeWa", "NoSewr", "AllPub"], "CentralAir": ["N", "Y"], "Electrical": ["Mix", "FuseP", "FuseF", "FuseA", "SBrkr"], "Fence": ["MnWw", "GdWo", "MnPrv", "GdPrv"], } ordered_levels = {key: ["None"] + value for key, value in ordered_levels.items() } def encode(df): for name in features_nom: df[name] = df[name].astype("category") if "None" not in df[name].cat.categories: df[name].cat.add_categories("None", inplace=True) for name, levels in ordered_levels.items() : df[name] = df[name].astype(CategoricalDtype(levels, ordered=True)) return df<data_type_conversions>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
2,539,513
def impute_plus(df): cols_with_missing = [col for col in df.columns if col != 'SalePrice' and df[col].isnull().any() ] for col in cols_with_missing: df[col + '_was_missing'] = df[col].isnull() df[col + '_was_missing'] =(df[col + '_was_missing'])* 1 for name in df.select_dtypes("number"): df[name] = df[name].fillna(0) for name in df.select_dtypes("category"): df[name] = df[name].fillna("None") return df<split>
img_width = 28 img_height = 28 n_channels = 1 X_train = X_train.values.reshape(-1,img_height,img_width,n_channels) test = test.values.reshape(-1,img_height,img_width,n_channels )
Digit Recognizer
2,539,513
df_train, df_test = load_data()<init_hyperparams>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
2,539,513
xgb_params = dict( max_depth=3, learning_rate=0.1, n_estimators=100, min_child_weight=1, colsample_bytree=1, subsample=1, reg_alpha=0, reg_lambda=1, num_parallel_tree=1, )<compute_train_metric>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=2 )
Digit Recognizer
2,539,513
def score_dataset(X, y, model=XGBRegressor(**xgb_params)) : for colname in X.select_dtypes(["category"]): X[colname] = X[colname].cat.codes log_y = np.log(y) score = cross_val_score( model, X, log_y, cv=5, scoring='neg_mean_squared_error' ) score = -1 * score.mean() score = np.sqrt(score) return score<compute_test_metric>
print("Total Images:",len(Y_train)+len(Y_val)) print("Training Images:",len(Y_train)) print("Validation Images:",len(Y_val))
Digit Recognizer
2,539,513
X = df_train.copy() y = X.pop("SalePrice") baseline_score = score_dataset(X, y) print(f"Baseline score: {baseline_score:.5f} RMSE" )<normalization>
model = Sequential() model.add(Convolution2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape = input_shape)) model.add(Convolution2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Convolution2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Convolution2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
2,539,513
mi_scores = make_mi_scores(X, y) <drop_column>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
2,539,513
def drop_uninformative(df, mi_scores, threshold=0.0): return df.loc[:, mi_scores > threshold]<drop_column>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
2,539,513
drop_uninformative(X, mi_scores )<prepare_x_and_y>
Model = model.fit_generator(datagen.flow(X_train, Y_train,batch_size=200),epochs=30,verbose=1,validation_data=(X_val, Y_val))
Digit Recognizer
2,539,513
X = df_train.copy() y = X.pop("SalePrice") mi_scores = make_mi_scores(X, y) X["AllPub"] = X["Utilities"] == "AllPub" mi_scores = make_mi_scores(X, y) X = drop_uninformative(X, mi_scores) X.head() score_dataset(X, y )<categorify>
model.save("cnn_digit_recognizer.h5" )
Digit Recognizer
2,539,513
def label_encode(df): X = df.copy() for colname in X.select_dtypes(['category']): X[colname] = X[colname].cat.codes return X<feature_engineering>
score = model.evaluate(X_train, Y_train, verbose=1) print('Train Loss:', score[0]) print('Train Accuracy:', score[1] )
Digit Recognizer
2,539,513
def mathematical_transforms(df): X = pd.DataFrame() X['LivLotRatio'] = df.GrLivArea / df.LotArea X['Spaciousness'] =(df.FirstFlrSF + df.SecondFlrSF)/ df.TotRmsAbvGrd X['AgeAtTOS'] = df.YrSold - df.YearBuilt return X<categorify>
score = model.X_valuate(X_val, Y_val, verbose=1) print('Validation Loss:', score[0]) print('Validation Accuracy:', score[1] )
Digit Recognizer
2,539,513
def interactions(df): X_inter_1 = pd.get_dummies(df.BldgType, prefix='Bldg') X_inter_1 = X_inter_1.mul(df.GrLivArea, axis=0) X_inter_2 = pd.get_dummies(df.BsmtCond, prefix='BsmtCond') X_inter_2 = X_inter_2.mul(df.TotalBsmtSF, axis=0) X_inter_3 = pd.get_dummies(df.GarageQual, prefix='GarageQual') X_inter_3 = X_inter_3.mul(df.GarageArea, axis=0) X = X_inter_1.join(X_inter_2) return X<prepare_x_and_y>
Y_pred = model.predict(X_val) Y_pred_classes = np.argmax(Y_pred,axis = 1) Y_true = np.argmax(Y_val,axis = 1) confusion_Matrix = confusion_matrix(Y_true, Y_pred_classes) print(confusion_Matrix )
Digit Recognizer
2,539,513
def counts(df): X = pd.DataFrame() X['PorchTypes'] = df[['WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch', 'Threeseasonporch', 'ScreenPorch' ]].gt(0.0 ).sum(axis=1) X['TotalHalfBath'] = df.BsmtFullBath + df.BsmtHalfBath X['TotalRoom'] = df.TotRmsAbvGrd + df.FullBath + df.HalfBath return X<create_dataframe>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
2,539,513
def group_transforms(df): X = pd.DataFrame() X['MedNhbdArea'] = df.groupby('Neighborhood')['GrLivArea'].transform('median') X['MeanAgeAtTOS'] = df.groupby('Neighborhood')['AgeAtTOS'].transform('mean') return X<define_variables>
final_Result = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) final_Result.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
7,324,632
cluster_features = [ "LotArea", "TotalBsmtSF", "FirstFlrSF", "SecondFlrSF", "GrLivArea", ]<find_best_model_class>
train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv'); test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv');
Digit Recognizer
7,324,632
def cluster_labels(df, features, n_clusters=20): X = df.copy() X_scaled = X.loc[:, features] X_scaled =(X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0) kmeans = KMeans(n_clusters=n_clusters, n_init=50, random_state=0) X_new = pd.DataFrame() X_new["Cluster"] = kmeans.fit_predict(X_scaled) return X_new<normalization>
rows = 28 cols = 28 tot_rows = train.shape[0] X_train = train.values[:,1:] y_train = keras.utils.to_categorical(train.label, 10) X_train = X_train.reshape(tot_rows, rows, cols, 1)/255.0 X_test = test.values[:] test_num_img = test.shape[0] X_test = X_test.reshape(test_num_img, rows, cols, 1)/255.0
Digit Recognizer
7,324,632
def cluster_distance(df, features, n_clusters=20): X = df.copy() X_scaled = X.loc[:, features] X_scaled =(X_scaled - X_scaled.mean(axis=0)) / X_scaled.std(axis=0) kmeans = KMeans(n_clusters=20, n_init=50, random_state=0) X_cd = kmeans.fit_transform(X_scaled) X_cd = pd.DataFrame( X_cd, columns=[f"Centroid_{i}" for i in range(X_cd.shape[1])] ) return X_cd<create_dataframe>
classifier = Sequential() classifier.add(Conv2D(32,(5,5),input_shape=(28,28,1),activation = 'relu',padding='same')) classifier.add(BatchNormalization()) classifier.add(Conv2D(32,(3,3),activation = 'relu',padding='same')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size=(2,2), strides=None)) classifier.add(BatchNormalization()) classifier.add(Dropout(0.25)) classifier.add(Conv2D(64,(5,5),activation = 'relu',padding='same')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,(3,3),activation = 'relu',padding='same')) classifier.add(BatchNormalization()) classifier.add(Conv2D(64,(3,3),strides=(2,2),activation = 'relu',padding='same')) classifier.add(BatchNormalization()) classifier.add(Dropout(0.25)) classifier.add(Flatten()) classifier.add(Dense(units=128,activation='relu')) classifier.add(Dropout(0.4)) classifier.add(Dense(units=10,activation='softmax'))
Digit Recognizer
7,324,632
def apply_pca(X, standardize=True): if standardize: X =(X - X.mean(axis=0)) / X.std(axis=0) pca = PCA() X_pca = pca.fit_transform(X) component_names = [f"PC{i+1}" for i in range(X_pca.shape[1])] X_pca = pd.DataFrame(X_pca, columns=component_names) loadings = pd.DataFrame( pca.components_.T, columns=component_names, index=X.columns, ) return pca, X_pca, loadings def plot_variance(pca, width=8, dpi=100): fig, axs = plt.subplots(1, 2) n = pca.n_components_ grid = np.arange(1, n + 1) evr = pca.explained_variance_ratio_ axs[0].bar(grid, evr) axs[0].set( xlabel="Component", title="% Explained Variance", ylim=(0.0, 1.0) ) cv = np.cumsum(evr) axs[1].plot(np.r_[0, grid], np.r_[0, cv], "o-") axs[1].set( xlabel="Component", title="% Cumulative Variance", ylim=(0.0, 1.0) ) fig.set(figwidth=8, dpi=100) return axs<define_variables>
classifier.compile(optimizer='adam',loss = 'binary_crossentropy',metrics=['accuracy']) classifier.fit(X_train,y_train,epochs=100,batch_size=64,validation_split=0.1,shuffle=True )
Digit Recognizer
7,324,632
pca_features = [ "GarageArea", "YearRemodAdd", "TotalBsmtSF", "GrLivArea", ]<load_pretrained>
result = classifier.predict_classes(X_test )
Digit Recognizer
7,324,632
<feature_engineering><EOS>
out = pd.DataFrame({"ImageId": i+1 , "Label": result[i]} for i in range(0, test_num_img)) out.to_csv('submission.csv', index=False )
Digit Recognizer
3,811,526
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<sort_values>
import PIL import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import keras from matplotlib import pyplot from sklearn import preprocessing
Digit Recognizer
3,811,526
component = "PC1" idx = X_pca[component].sort_values(ascending=False ).index df_train[["SalePrice", "Neighborhood", "SaleCondition"] + pca_features].iloc[idx]<create_dataframe>
run_model1 = False run_model2 = False run_model3 = False run_model_adv = True
Digit Recognizer
3,811,526
def indicate_outliers(df): X_new = pd.DataFrame() X_new["Outlier"] =(df.Neighborhood == "Edwards")&(df.SaleCondition == "Partial") return X_new<categorify>
train = pd.read_csv('.. /input/train.csv', delimiter=',') test = pd.read_csv('.. /input/test.csv', delimiter=',' )
Digit Recognizer
3,811,526
class CrossFoldEncoder: def __init__(self, encoder, **kwargs): self.encoder_ = encoder self.kwargs_ = kwargs self.cv_ = KFold(n_splits=5) def fit_transform(self, X, y, cols): self.fitted_encoders_ = [] self.cols_ = cols X_encoded = [] for idx_encode, idx_train in self.cv_.split(X): fitted_encoder = self.encoder_(cols=cols, **self.kwargs_) fitted_encoder.fit( X.iloc[idx_encode, :], y.iloc[idx_encode], ) X_encoded.append(fitted_encoder.transform(X.iloc[idx_train, :])[cols]) self.fitted_encoders_.append(fitted_encoder) X_encoded = pd.concat(X_encoded) X_encoded.columns = [name + "_encoded" for name in X_encoded.columns] return X_encoded def transform(self, X): X_encoded_list = [] for fitted_encoder in self.fitted_encoders_: X_encoded = fitted_encoder.transform(X) X_encoded_list.append(X_encoded[self.cols_]) X_encoded = reduce( lambda x, y: x.add(y, fill_value=0), X_encoded_list )/ len(X_encoded_list) X_encoded.columns = [name + "_encoded" for name in X_encoded.columns] return X_encoded<drop_column>
train_size = train.shape[0] test_size = test.shape[0] X_train = train.iloc[:, 1:].values.astype('uint8') Y_train = train.iloc[:, 0] X_test = test.iloc[:, :].values.astype('uint8') img_dimension = np.int32(np.sqrt(X_train.shape[1])) img_rows, img_cols = img_dimension, img_dimension nb_of_color_channels = 1 if(keras.backend.image_dim_ordering() =="th"): X_train = X_train.reshape(train.shape[0], nb_of_color_channels, img_rows, img_cols) X_test = X_test.reshape(test.shape[0], nb_of_color_channels, img_rows, img_cols) in_shape =(nb_of_color_channels, img_rows, img_cols) else: X_train = X_train.reshape(train.shape[0], img_rows, img_cols, nb_of_color_channels) X_test = X_test.reshape(test.shape[0], img_rows, img_cols, nb_of_color_channels) in_shape =(img_rows, img_cols, nb_of_color_channels) print('Data Information ') print('Training set size: {} Testing set size: {}'.format(train_size, test_size)) print('Image dimension: {0}*{0}'.format(img_dimension))
Digit Recognizer
3,811,526
def create_features(df, df_test=None): X = df.copy() y = X.pop('SalePrice') mi_scores = make_mi_scores(X, y) if df_test is not None: X_test = df_test.copy() y_test = X_test.pop("SalePrice") X = pd.concat([X, X_test]) X = X.join(mathematical_transforms(X)) X = X.join(counts(X)) X = X.join(group_transforms(X)) X = X.join(pca_inspired(X)) X = label_encode(X) if df_test is not None: mi_scores = make_mi_scores(X, pd.concat([y, y_test])) else: mi_scores = make_mi_scores(X, y) X = drop_uninformative(X, mi_scores, 0.02) if df_test is not None: X_test = X.loc[df_test.index, :] X.drop(df_test.index, inplace=True) encoder = CrossFoldEncoder(MEstimateEncoder, m=1) X = X.join(encoder.fit_transform(X, y, cols=["MSSubClass"])) if df_test is not None: X_test = X_test.join(encoder.transform(X_test)) if df_test is not None: return X, X_test else: return X<prepare_x_and_y>
X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train_nor = X_train / 255 X_test_nor= X_test / 255
Digit Recognizer
3,811,526
df_train, df_test = load_data() X_train = create_features(df_train) y_train = df_train.loc[:, 'SalePrice'] score_dataset(X_train, y_train )<prepare_x_and_y>
oh_encoder = preprocessing.OneHotEncoder(categories='auto') oh_encoder.fit(Y_train.values.reshape(-1,1)) Y_train_oh = oh_encoder.transform(Y_train.values.reshape(-1,1)).toarray()
Digit Recognizer
3,811,526
X_train = create_features(df_train) y_train = df_train.loc[:, "SalePrice"] xgb_params = dict( max_depth=4, learning_rate=0.0058603076512435655, n_estimators=5045, min_child_weight=2, colsample_bytree=0.22556099175248345, subsample=0.5632348136091383, reg_alpha=0.09888625622197889, reg_lambda=0.00890758697724437, num_parallel_tree=1, ) xgb = XGBRegressor(**xgb_params) score_dataset(X_train, y_train, xgb )<init_hyperparams>
print('One-hot:') print(Y_train_oh[:5]) print(' Label:') print(Y_train[:5] )
Digit Recognizer
3,811,526
<predict_on_test>
to_categorical(Y_train, Y_train.unique().shape[0])[:5]
Digit Recognizer
3,811,526
X_train, X_test = create_features(df_train, df_test) y_train = df_train.loc[:, "SalePrice"] xgb = XGBRegressor(**xgb_params) xgb.fit(X_train, np.log(y)) predictions = np.exp(xgb.predict(X_test)) output = pd.DataFrame({'Id': X_test.index, 'SalePrice': predictions} )<save_to_csv>
from keras.layers import Activation,Dropout,Dense,Conv2D,AveragePooling2D,Flatten,ZeroPadding2D,MaxPooling2D from keras.models import Sequential from keras import optimizers from keras.callbacks import ReduceLROnPlateau
Digit Recognizer
3,811,526
output.to_csv('submission.csv', index=False) print("Your predictions are successfully saved!" )<save_to_csv>
def build_lenet5(model, input_shape=X_train.shape[1:], dropout=0): S = [1,2,1,2,1] N_input = [28,28,14,10,5] P = [2,0,0,0,0] N = [28,14,10,5,1] F = [i[0] + 2*i[1] - i[3]*(i[2] - 1)for i in zip(N_input, P, N, S)] model.add(Conv2D(filters=6, kernel_size=(F[0],F[0]), padding='same', strides=S[0], activation='relu', input_shape=input_shape)) model.add(MaxPooling2D(pool_size=F[1], strides=S[1])) model.add(Conv2D(filters=16, kernel_size=(F[2],F[2]), padding='valid', strides=S[2], activation='relu')) model.add(MaxPooling2D(pool_size=F[3], strides=S[3])) model.add(Conv2D(filters=120, kernel_size=(F[4],F[4]), padding='valid', strides=S[4], activation='relu')) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(84, activation='relu')) model.add(Dense(10, activation='softmax')) if __name__ == '__main__' and run_model1: model = Sequential() build_lenet5(model, input_shape=X_train.shape[1:], dropout=0) model.summary()
Digit Recognizer
3,811,526
filename = 'ames_house_xgb_model.pkl' pickle.dump(xgb, open(filename, 'wb')) X_test.to_csv('df_test_processed.csv', index=False )<predict_on_test>
hist_dict = {} if __name__ == '__main__' and run_model1: adam = optimizers.Adam() model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam) hist_dict['run_model1'] = model.fit(X_train, Y_train_oh, batch_size=64, epochs=20, shuffle=True, validation_split=0.2, verbose=2)
Digit Recognizer
3,811,526
row_to_show = 42 data_for_prediction = X_test.iloc[[row_to_show]] y_sample = np.exp(xgb.predict(data_for_prediction)) explainer = shap.TreeExplainer(xgb) shap_values = explainer.shap_values(data_for_prediction )<predict_on_test>
def model_predict(model): print("Generating test predictions...") predictions = model.predict_classes(X_test, verbose=1) print("OK.") return predictions def model_predict_val(model, set_check): print("Generating set predictions...") predictions = model.predict_classes(set_check, verbose=1) print("OK.") return predictions def write_preds(preds, filename): pd.DataFrame({"ImageId": list(range(1,len(preds)+1)) , "Label": preds} ).to_csv(filename, index=False, header=True) if __name__ == '__main__' and run_model1: predictions = model_predict(model) print(predictions[:5]) write_preds(predictions, "keras-lenet5-basic.csv")
Digit Recognizer
3,811,526
data_for_prediction = X_test y_sample = np.exp(xgb.predict(data_for_prediction)) explainer = shap.TreeExplainer(xgb) shap_values = explainer.shap_values(data_for_prediction )<define_variables>
if __name__ == '__main__' and run_model2: model = Sequential() build_lenet5(model, input_shape=X_train.shape[1:], dropout=0.3) model.summary() adam = optimizers.Adam() model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam) hist_dict['run_model2'] = model.fit(X_train, Y_train_oh, batch_size=64, epochs=20, shuffle=True, validation_split=0.2, verbose=2 )
Digit Recognizer
3,811,526
BATCH_SIZE = 128 EPOCHS = 15<load_from_csv>
if __name__ == '__main__' and run_model2: predictions = model_predict(model) print(predictions[:5]) write_preds(predictions, "keras-lenet5-basic-droupout.csv" )
Digit Recognizer
3,811,526
train = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv") test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv" )<set_options>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False) datagen.fit(X_train )
Digit Recognizer
3,811,526
sns.set_theme(rc = {'grid.linewidth': 0.5, 'axes.linewidth': 0.75, 'axes.facecolor': ' 'figure.facecolor': ' 'xtick.labelcolor': '<prepare_x_and_y>
for x_batch, y_batch in datagen.flow(X_train, Y_train_oh, batch_size=9, shuffle = False): print(x_batch.shape) print(y_batch.shape) break
Digit Recognizer
3,811,526
ntrain = train.shape[0] ntest = test.shape[0] y_train = train.SalePrice.values all_data = pd.concat(( train, test)).reset_index(drop=True) all_data.drop(['SalePrice', 'GarageArea', 'TotRmsAbvGrd'], axis=1, inplace=True) print("all_data size is : {}".format(all_data.shape))<create_dataframe>
if __name__ == '__main__' and run_model3: X_train_s, X_val, Y_train_s, Y_val = train_test_split(X_train, Y_train_oh, test_size=0.13, random_state=42) model = Sequential() build_lenet5(model, input_shape=X_train_s.shape[1:], dropout=0.15) model.summary() adam = optimizers.Adam() model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam) epochs = 45 batch_size = 72 Train_gen_batch = datagen.flow(X_train_s, Y_train_s, batch_size=batch_size) datagen_no_aug = ImageDataGenerator() Val_gen_batch = datagen_no_aug.flow(X_val, Y_val, batch_size=batch_size) hist_dict['run_model3'] = model.fit_generator(Train_gen_batch, epochs = epochs, verbose = 2, steps_per_epoch = X_train.shape[0] // batch_size, validation_data = Val_gen_batch, validation_steps = X_val.shape[0] // batch_size, callbacks=[learning_rate_reduction])
Digit Recognizer
3,811,526
all_data_na =(all_data.isnull().sum() / len(all_data)) * 100 all_data_na = all_data_na.drop(all_data_na[all_data_na == 0].index ).sort_values(ascending=False)[:30] missing_data = pd.DataFrame({'Missing Ratio' :all_data_na}) missing_data.head(20 )<data_type_conversions>
if __name__ == '__main__' and run_model3: predictions = model_predict(model) print(predictions[:5]) write_preds(predictions, "keras-lenet5-aug.csv")
Digit Recognizer
3,811,526
all_data["PoolQC"] = all_data["PoolQC"].fillna("None" )<data_type_conversions>
Digit Recognizer
3,811,526
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None" )<data_type_conversions>
def build_net_advanced(model, input_shape=X_train.shape[1:], dropout=0.25): model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', strides=1, activation='relu', input_shape=input_shape)) model.add(Conv2D(filters=32, kernel_size=(5,5), padding='valid', strides=2, activation='relu')) model.add(MaxPooling2D(pool_size=(3,3), strides=1)) model.add(Dropout(dropout)) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', strides=1, activation='relu')) model.add(Conv2D(filters=64, kernel_size=(3,3), padding='valid', strides=2, activation='relu')) model.add(MaxPooling2D(pool_size=(2,2), strides=1)) model.add(Dropout(dropout)) model.add(Flatten()) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(128, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(10, activation='softmax'))
Digit Recognizer
3,811,526
all_data["Alley"] = all_data["Alley"].fillna("None" )<data_type_conversions>
if __name__ == '__main__' and run_model_adv: X_train_s, X_val, Y_train_s, Y_val = train_test_split(X_train, Y_train_oh, test_size=0.15, random_state=42) model = Sequential() build_net_advanced(model, input_shape=X_train_s.shape[1:], dropout=0.3) model.summary() adam = optimizers.Adam() model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam) epochs = 35 batch_size = 84 Train_gen_batch = datagen.flow(X_train_s, Y_train_s, batch_size=batch_size) datagen_no_aug = ImageDataGenerator() Val_gen_batch = datagen_no_aug.flow(X_val, Y_val, batch_size=batch_size) hist_dict['run_model_adv'] = model.fit_generator(Train_gen_batch, epochs = epochs, verbose = 2, steps_per_epoch = X_train.shape[0] // batch_size, validation_data = Val_gen_batch, validation_steps = X_val.shape[0] // batch_size, callbacks=[learning_rate_reduction] )
Digit Recognizer
3,811,526
all_data["Fence"] = all_data["Fence"].fillna("None" )<data_type_conversions>
if __name__ == '__main__' and run_model_adv: predictions = model_predict(model) print(predictions[:5]) write_preds(predictions, "keras-adv-net.csv")
Digit Recognizer
3,811,526
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None" )<categorify>
_, X_val_check, _, Y_val_check = train_test_split(X_train, Y_train, test_size=0.1, random_state=1) Ypred_val_check = model_predict_val(model, set_check=X_val_check)
Digit Recognizer
3,811,526
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform( lambda x: x.fillna(x.median()))<data_type_conversions>
cm = confusion_matrix(Y_val_check.values, Ypred_val_check) cm
Digit Recognizer