kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
4,048,733
learn = Learner(dls, model, metrics=[accuracy, F1Score() ] ).to_fp16() learn.lr_find()<train_model>
X_train,X_val,Y_train,Y_val =train_test_split(X_train, y_train, test_size=0.1, random_state=0 )
Digit Recognizer
4,048,733
learn.fit_one_cycle(3, lr_max=1e-5 )<find_best_params>
datagen = ImageDataGenerator(rotation_range = 15, width_shift_range = 0.1, height_shift_range = 0.1) datagen.fit(X_train) np.concatenate(( X_train,X_train),axis=0) random.seed(12345) for X_batch, Y_batch in datagen.flow(np.concatenate(( X_train,X_train),axis=0), np.concatenate(( Y_train,Y_train),axis=0), batch_size=35700): break X_train_aug = X_batch Y_train_aug = Y_batch for i in range(0, 9): pyplot.subplot(330 + 1 + i) pyplot.imshow(X_train_aug[i].reshape(28, 28), cmap=pyplot.get_cmap('gray')) pyplot.show()
Digit Recognizer
4,048,733
preds, targs = learn.get_preds() min_threshold = None max_f1 = -float("inf") thresholds = np.linspace(0.3, 0.7, 50) for threshold in thresholds: f1 = f1_score(targs, F.softmax(preds, dim=1)[:, 1]>threshold) if f1 > max_f1: min_threshold = threshold min_f1 = f1 print(f"threshold:{threshold:.4f} - f1:{f1:.4f}" )<string_transform>
X_train_aug = np.concatenate(( X_train,X_train_aug),axis=0) Y_train_aug = np.concatenate(( Y_train,Y_train_aug),axis=0) print(X_train_aug.shape,Y_train_aug.shape )
Digit Recognizer
4,048,733
test_tensor = tokenizer(list(test_df["text"]), padding="max_length", truncation=True, max_length=30, return_tensors="pt")["input_ids"]<load_pretrained>
model = Sequential() model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) model.add(Conv2D(filters = 32, kernel_size =(5,5),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) model.add(MaxPool2D(pool_size=(2,2), strides=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(256, activation = "relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation = "softmax"))
Digit Recognizer
4,048,733
class TestDS: def __init__(self, tensors): self.tensors = tensors def __len__(self): return len(self.tensors) def __getitem__(self, idx): t = self.tensors[idx] return t, tensor(0) test_dl = DataLoader(TestDS(test_tensor), bs=128 )<predict_on_test>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0 )
Digit Recognizer
4,048,733
test_preds = learn.get_preds(dl=test_dl )<save_to_csv>
model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"])
Digit Recognizer
4,048,733
sub = pd.read_csv(dir_path + "sample_submission.csv") prediction =(F.softmax(test_preds[0], dim=1)[:, 1]>min_threshold ).int() sub = pd.read_csv(dir_path + "sample_submission.csv") sub["target"] = prediction sub.to_csv("submission.csv", index=False )<import_modules>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
4,048,733
import numpy as np import pandas as pd<load_from_csv>
epochs = 10 batch_size = 86
Digit Recognizer
4,048,733
train=pd.read_csv('.. /input/nlp-getting-started/train.csv') test=pd.read_csv('.. /input/nlp-getting-started/test.csv' )<load_pretrained>
history = model.fit(X_train_aug, Y_train_aug, batch_size = batch_size, epochs = epochs, validation_data =(X_val, Y_val), verbose = 2, callbacks=[learning_rate_reduction] )
Digit Recognizer
4,048,733
nltk.download('punkt') nltk.download('stopwords') !pip install contractions nltk.download('wordnet') !pip install pyspellchecker <string_transform>
result = model.predict(Test) result = np.argmax(result,axis = 1) result.shape
Digit Recognizer
4,048,733
stop_words=nltk.corpus.stopwords.words('english') i=0 wnl=WordNetLemmatizer() stemmer=SnowballStemmer('english') for doc in train.text: doc=re.sub(r'https?://\S+|www\.\S+','',doc) doc=re.sub(r'<.*?>','',doc) doc=re.sub(r'[^a-zA-Z\s]','',doc,re.I|re.A) doc=' '.join([wnl.lemmatize(i)for i in doc.lower().split() ]) doc=contractions.fix(doc) tokens=nltk.word_tokenize(doc) filtered=[token for token in tokens if token not in stop_words] doc=' '.join(filtered) train.text[i]=doc i+=1 i=0 for doc in test.text: doc=re.sub(r'https?://\S+|www\.\S+','',doc) doc=re.sub(r'<.*?>','',doc) doc=re.sub(r'[^a-zA-Z\s]','',doc,re.I|re.A) doc=' '.join([wnl.lemmatize(i)for i in doc.lower().split() ]) doc=contractions.fix(doc) tokens=nltk.word_tokenize(doc) filtered=[token for token in tokens if token not in stop_words] doc=' '.join(filtered) test.text[i]=doc i+=1<install_modules>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
3,287,070
!pip install tensorflow_text <define_variables>
%matplotlib inline
Digit Recognizer
3,287,070
bert_model_name = 'bert_en_uncased_L-12_H-768_A-12' map_name_to_handle = { 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/1', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-256_A-4/1', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-512_A-8/1', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-768_A-12/1', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-128_A-2/1', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-256_A-4/1', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-512_A-8/1', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-4_H-768_A-12/1', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-128_A-2/1', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-256_A-4/1', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-512_A-8/1', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-6_H-768_A-12/1', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-128_A-2/1', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-256_A-4/1', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-512_A-8/1', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-8_H-768_A-12/1', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-128_A-2/1', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-256_A-4/1', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-512_A-8/1', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-10_H-768_A-12/1', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-128_A-2/1', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-256_A-4/1', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-512_A-8/1', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-12_H-768_A-12/1', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_base/2', 'electra_small': 'https://tfhub.dev/google/electra_small/2', 'electra_base': 'https://tfhub.dev/google/electra_base/2', 'experts_pubmed': 'https://tfhub.dev/google/experts/bert/pubmed/2', 'experts_wiki_books': 'https://tfhub.dev/google/experts/bert/wiki_books/2', 'talking-heads_base': 'https://tfhub.dev/tensorflow/talkheads_ggelu_bert_en_base/1', } map_model_to_preprocess = { 'bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_en_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_cased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-2_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-4_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-6_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-8_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-10_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-128_A-2': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-256_A-4': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-512_A-8': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'small_bert/bert_en_uncased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'bert_multi_cased_L-12_H-768_A-12': 'https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/3', 'albert_en_base': 'https://tfhub.dev/tensorflow/albert_en_preprocess/3', 'electra_small': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'electra_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_pubmed': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'experts_wiki_books': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', 'talking-heads_base': 'https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', } tfhub_handle_encoder = map_name_to_handle[bert_model_name] tfhub_handle_preprocess = map_model_to_preprocess[bert_model_name] print(f'BERT model selected : {tfhub_handle_encoder}') print(f'Preprocess model auto-selected: {tfhub_handle_preprocess}' )<categorify>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.cm as cm import seaborn as sns
Digit Recognizer
3,287,070
bert_preprocess_model = hub.KerasLayer(tfhub_handle_preprocess) text_test = ['this is such an amazing movie!'] text_preprocessed = bert_preprocess_model(text_test) print(f'Keys : {list(text_preprocessed.keys())}') print(f'Shape : {text_preprocessed["input_word_ids"].shape}') print(f'Word Ids : {text_preprocessed["input_word_ids"][0, :12]}') print(f'Input Mask : {text_preprocessed["input_mask"][0, :12]}') print(f'Type Ids : {text_preprocessed["input_type_ids"][0, :12]}' )<train_model>
from keras.utils import np_utils from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import RMSprop, Adam from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import itertools
Digit Recognizer
3,287,070
bert_model = hub.KerasLayer(tfhub_handle_encoder) bert_results = bert_model(text_preprocessed) print(f'Loaded BERT: {tfhub_handle_encoder}') print(f'Pooled Outputs Shape:{bert_results["pooled_output"].shape}') print(f'Pooled Outputs Values:{bert_results["pooled_output"][0, :12]}') print(f'Sequence Outputs Shape:{bert_results["sequence_output"].shape}') print(f'Sequence Outputs Values:{bert_results["sequence_output"][0, :12]}' )<categorify>
PATH = '.. /input/'
Digit Recognizer
3,287,070
def build_classifier_model() : text_input = tf.keras.layers.Input(shape=() , dtype=tf.string, name='text') preprocessing_layer = hub.KerasLayer(tfhub_handle_preprocess, name='preprocessing') encoder_inputs = preprocessing_layer(text_input) encoder = hub.KerasLayer(tfhub_handle_encoder, trainable=True, name='BERT_encoder') outputs = encoder(encoder_inputs) net = outputs['pooled_output'] net = tf.keras.layers.Dropout(0.1 )(net) net = tf.keras.layers.Dense(1, activation='sigmoid', name='classifier' )(net) return tf.keras.Model(text_input, net) classifier_model = build_classifier_model() classifier_model.compile(optimizer=tf.keras.optimizers.Adam(3e-5), loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), metrics=tf.metrics.BinaryAccuracy()) checkpoint=tf.keras.callbacks.ModelCheckpoint('model.h5',monitor='val_loss',save_best_only=True) es=tf.keras.callbacks.EarlyStopping(monitor='val_loss',patience=2,restore_best_weights=True) history = classifier_model.fit(train.text,train.target,validation_split=0.2,epochs=10,callbacks=[checkpoint,es],batch_size=8 )<load_pretrained>
df_train = pd.read_csv(f'{PATH}train.csv') df_test = pd.read_csv(f'{PATH}test.csv' )
Digit Recognizer
3,287,070
classifier_model.load_weights('./model.h5') pred=classifier_model.predict(test.text )<count_values>
X_train = df_train.drop('label', axis=1 ).values X_test = df_test.values Y_train = df_train.label print(f'Number of training Examples, {X_train.shape[0]}') print(f'Number of test Examples, {X_test.shape[0]}') print(f'Number of classes, {np.unique(Y_train)}' )
Digit Recognizer
3,287,070
pd.DataFrame(np.where(pred>0.5,1,0)).value_counts()<save_to_csv>
X_train = X_train.reshape(-1, 28, 28, 1) X_test = X_test.reshape(-1, 28, 28, 1 )
Digit Recognizer
3,287,070
pd.DataFrame({ 'id':test.id, 'target':np.where(pred>0.5,1,0)[:,0] } ).to_csv('submission.csv',index=False )<load_from_csv>
print('Integer Valued Labels') print(Y_train[:10]) Y_train = np_utils.to_categorical(Y_train, 10) print('One Hot Labels') print(Y_train[:10] )
Digit Recognizer
3,287,070
train_data = pd.read_csv(".. /input/nlp-getting-started/train.csv") train_data.head(5 )<load_from_csv>
X_train = X_train.astype('float32')/ 255 X_test = X_test.astype('float32')/ 255
Digit Recognizer
3,287,070
test_data = pd.read_csv(".. /input/nlp-getting-started/test.csv") test_data.head(5 )<install_modules>
X_train, X_valid = X_train[7000:], X_train[:7000] Y_train, Y_valid = Y_train[7000:], Y_train[:7000]
Digit Recognizer
3,287,070
!pip install BeautifulSoup4<string_transform>
model = Sequential() model.add(Flatten(input_shape=X_train.shape[1:])) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(512, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary()
Digit Recognizer
3,287,070
stop = set(stopwords.words('english')) stop.update(list(string.punctuation)) def clean_tweets(text): re1 = re.compile(r' +') x1 = text.lower().replace(' 'nbsp;', ' ' ).replace(' ', " " ).replace('quot;', "'" ).replace( '<br />', " " ).replace('\"', '"' ).replace('<unk>', 'u_n' ).replace(' @.@ ', '.' ).replace( ' @-@ ', '-' ).replace('\', ' \\ ') text = re1.sub(' ', html.unescape(x1)) text = unicodedata.normalize('NFKD', text ).encode('ascii', 'ignore' ).decode('utf-8', 'ignore') soup = BeautifulSoup(text, 'html.parser') text = soup.get_text() text = re.sub('\[[^]]*\]', '', text) text = re.sub(r'http\S+', '', text) text = text.replace("@", "") text = text.replace(" text = re.sub(r'[^a-zA-Z ]', '', text) final_text = [] for word in text.split() : if word.strip().lower() not in stop: final_text.append(word.strip().lower()) text = " ".join(final_text) lemmatizer = WordNetLemmatizer() text = " ".join([lemmatizer.lemmatize(word)for word in text.split() ]) text = " ".join([lemmatizer.lemmatize(word, pos = 'v')for word in text.split() ]) text = re.sub("\d", "num", text) return text.lower() train_data['prep_text'] = train_data['text'].apply(clean_tweets) train_data['prep_text'].head(5 )<feature_engineering>
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'] )
Digit Recognizer
3,287,070
test_data['text'] = test_data['text'].apply(clean_tweets) test_data['text'].head(5 )<train_model>
checkpointer = ModelCheckpoint(filepath='mnist.model.best.hdf5', verbose=1, save_best_only=True) hist = model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_split=0.2, callbacks=[checkpointer], verbose=1, shuffle=True )
Digit Recognizer
3,287,070
vocab_size = 1000 tokenizer = Tokenizer(num_words = vocab_size, oov_token = 'UNK') tokenizer.fit_on_texts(list(train_data['prep_text'])+ list(test_data['text']))<data_type_conversions>
model.load_weights('mnist.model.best.hdf5' )
Digit Recognizer
3,287,070
X_train_ohe = tokenizer.texts_to_matrix(train_data['prep_text'], mode = 'binary') X_test_ohe = tokenizer.texts_to_matrix(test_data['text'], mode = 'binary') y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape: {X_train_ohe.shape}") print(f"X_test shape: {X_test_ohe.shape}") print(f"y_train shape: {y_train.shape}" )<split>
score = model.evaluate(X_valid, Y_valid, verbose=0) accuracy = 100 * score[1] print(f'Test Accuracy {accuracy:.4f}' )
Digit Recognizer
3,287,070
X_train_ohe, X_val_ohe, y_train, y_val = train_test_split(X_train_ohe, y_train, random_state = 42, test_size = 0.2) print(f"X_train shape: {X_train_ohe.shape}") print(f"X_val shape: {X_val_ohe.shape}") print(f"y_train shape: {y_train.shape}") print(f"y_val shape: {y_val.shape}" )<choose_model_class>
def submit_result(model, X_test): results = model.predict(X_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("mnist_datagen.csv",index=False)
Digit Recognizer
3,287,070
def setup_model() : model = Sequential() model.add(layers.Dense(1, activation='sigmoid', input_shape=(vocab_size,))) model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss=losses.binary_crossentropy, metrics=[metrics.binary_accuracy]) return model model = setup_model() model.summary()<train_model>
cnn_model = Sequential() cnn_model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='Same', activation='relu', input_shape=X_train.shape[1:])) cnn_model.add(Conv2D(filters=32, kernel_size=(5, 5), padding='Same', activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.2)) cnn_model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='Same', activation='relu')) cnn_model.add(Conv2D(filters=64, kernel_size=(3, 3), padding='Same', activation='relu')) cnn_model.add(MaxPooling2D(pool_size=(2, 2))) cnn_model.add(Dropout(0.2)) cnn_model.add(Flatten()) cnn_model.add(Dense(512, activation='relu')) cnn_model.add(Dropout(0.2)) cnn_model.add(Dense(512, activation='relu')) cnn_model.add(Dropout(0.2)) cnn_model.add(Dense(10, activation='softmax')) cnn_model.summary()
Digit Recognizer
3,287,070
history = model.fit(X_train_ohe, y_train, epochs = 20, batch_size = 512, validation_data =(X_val_ohe, y_val))<compute_test_metric>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) cnn_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=['accuracy'] )
Digit Recognizer
3,287,070
_, accuracy = model.evaluate(X_val_ohe, y_val )<data_type_conversions>
checkpointer = ModelCheckpoint(filepath='mnist.model.best.cnn.hdf5', verbose=1, save_best_only=True) hist = cnn_model.fit(X_train, Y_train, batch_size=128, epochs=10, validation_split=0.2, callbacks=[checkpointer], verbose=1, shuffle=True )
Digit Recognizer
3,287,070
X_train_wc = tokenizer.texts_to_matrix(train_data['prep_text'], mode = 'count') X_test_wc = tokenizer.texts_to_matrix(test_data['text'], mode = 'count') y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape: {X_train_wc.shape}") print(f"X_test shape: {X_test_wc.shape}") print(f"y_train shape: {y_train.shape}") <split>
cnn_model.load_weights('mnist.model.best.cnn.hdf5' )
Digit Recognizer
3,287,070
X_train_wc, X_val_wc, y_train, y_val = train_test_split(X_train_wc, y_train, random_state = 42, test_size = 0.2) print(f"X_train shape: {X_train_wc.shape}") print(f"X_val shape: {X_val_wc.shape}") print(f"y_train shape: {y_train.shape}") print(f"y_val shape: {y_val.shape}" )<train_model>
score = cnn_model.evaluate(X_valid, Y_valid, verbose=0) accuracy = 100 * score[1] print(f'Test Accuracy {accuracy:.4f}' )
Digit Recognizer
3,287,070
history = model.fit(X_train_wc, y_train, epochs = 20, batch_size = 512, validation_data =(X_val_wc, y_val))<compute_test_metric>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.2) print(f'Shape of the training data {X_train.shape}') print(f'Shape of the validation dat {X_val.shape}' )
Digit Recognizer
3,287,070
_, accuracy = model.evaluate(X_val_wc, y_val )<data_type_conversions>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False ) datagen.fit(X_train )
Digit Recognizer
3,287,070
X_train_freq = tokenizer.texts_to_matrix(train_data['prep_text'], mode = 'freq') X_test_freq = tokenizer.texts_to_matrix(test_data['text'], mode = 'freq') y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape: {X_train_freq.shape}") print(f"X_test shape: {X_test_freq.shape}") print(f"y_train shape: {y_train.shape}" )<split>
batch_size=128 checkpointer = ModelCheckpoint(filepath='mnist.model.best.cnn.aug.hdf5', verbose=1, save_best_only=True) hist = cnn_model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=10, validation_data=(X_val, Y_val), callbacks=[checkpointer], verbose=2, shuffle=True, steps_per_epoch=X_train.shape[0] // batch_size )
Digit Recognizer
3,287,070
X_train_freq, X_val_freq, y_train, y_val = train_test_split(X_train_freq, y_train, test_size = 0.2, random_state = 42) print(f"X_train shape: {X_train_freq.shape}") print(f"X_val shape: {X_val_freq.shape}") print(f"y_train shape: {y_train.shape}") print(f"y_val shape: {y_val.shape}" )<train_model>
cnn_model.load_weights('mnist.model.best.cnn.aug.hdf5' )
Digit Recognizer
3,287,070
history = model.fit(X_train_freq, y_train, epochs = 20, batch_size = 512, validation_data =(X_val_freq, y_val))<train_model>
score = cnn_model.evaluate(X_valid, Y_valid, verbose=0) accuracy = 100 * score[1] print(f'Test Accuracy {accuracy:.4f}' )
Digit Recognizer
3,287,070
vectorizer = TfidfVectorizer(max_features = vocab_size) vectorizer.fit(list(train_data['prep_text'])+ list(test_data['text'])) X_train_tfidf = vectorizer.transform(list(train_data['prep_text'])).toarray() X_test_tfidf = vectorizer.transform(list(test_data['text'])).toarray() y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape {X_train_tfidf.shape}") print(f"X_test shape {X_test_tfidf.shape}") print(f"y_train shape {y_train.shape}" )<split>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
3,287,070
X_train_tfidf, X_val_tfidf, y_train, y_val = train_test_split(X_train_tfidf, y_train, test_size = 0.2, random_state = 42) print(f"X_train shape: {X_train_tfidf.shape}") print(f"X_val shape: {X_val_tfidf.shape}") print(f"y_train shape: {y_train.shape}") print(f"y_val shape: {y_val.shape}" )<train_model>
checkpointer = ModelCheckpoint(filepath='mnist.model.best.cnn.aug.ann.hdf5', verbose=1, save_best_only=True) hist = cnn_model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=20, validation_data=(X_val, Y_val), callbacks=[checkpointer, learning_rate_reduction], verbose=2, shuffle=True, steps_per_epoch=X_train.shape[0] // batch_size)
Digit Recognizer
3,287,070
history = model.fit(X_train_tfidf, y_train, epochs = 20, batch_size = 512, validation_data =(X_val_tfidf, y_val))<define_variables>
cnn_model.load_weights('mnist.model.best.cnn.aug.ann.hdf5' )
Digit Recognizer
3,287,070
embedding_dict={} with open('.. /input/glovetwitter27b100dtxt/glove.twitter.27B.100d.txt','r')as f: for line in f: values=line.split() word = values[0] vectors=np.asarray(values[1:],'float32') embedding_dict[word]=vectors f.close()<categorify>
score = cnn_model.evaluate(X_valid, Y_valid, verbose=0) accuracy = 100 * score[1] print(f'Test Accuracy {accuracy:.4f}' )
Digit Recognizer
3,287,070
vocab_size = 10000 tokenizer = Tokenizer(num_words = vocab_size, oov_token = 'UNK') tokenizer.fit_on_texts(list(train_data['prep_text'])+ list(test_data['text'])) max_len = 15 X_train_seq = tokenizer.texts_to_sequences(train_data['prep_text']) X_test_seq = tokenizer.texts_to_sequences(test_data['text']) X_train_seq = pad_sequences(X_train_seq, maxlen = max_len, truncating = 'post', padding = 'post') X_test_seq = pad_sequences(X_test_seq, maxlen = max_len, truncating = 'post', padding = 'post') y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape: {X_train_seq.shape}") print(f"X_test shape: {X_test_seq.shape}") print(f"y_train shape: {y_train.shape}" )<split>
submit_result(cnn_model, X_test )
Digit Recognizer
3,287,070
X_train_seq, X_val_seq, y_train, y_val = train_test_split(X_train_seq, y_train, test_size = 0.2, random_state = 42) print(f"X_train shape: {X_train_seq.shape}") print(f"X_val shape: {X_val_seq.shape}") print(f"y_train shape: {y_train.shape}") print(f"y_val shape: {y_val.shape}" )<count_unique_values>
cnn_bn_model = Sequential() cnn_bn_model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu', input_shape =(28,28,1))) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(Conv2D(filters = 64, kernel_size =(5,5),padding = 'Same', activation ='relu')) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(MaxPooling2D(pool_size=(2,2))) cnn_bn_model.add(Dropout(0.25)) cnn_bn_model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(Conv2D(filters = 64, kernel_size =(3,3),padding = 'Same', activation ='relu')) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2))) cnn_bn_model.add(Dropout(0.25)) cnn_bn_model.add(Conv2D(filters = 64, kernel_size =(3,3), padding = 'Same', activation ='relu')) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(Dropout(0.25)) cnn_bn_model.add(Flatten()) cnn_bn_model.add(Dense(256, activation = "relu")) cnn_bn_model.add(BatchNormalization()) cnn_bn_model.add(Dropout(0.25)) cnn_bn_model.add(Dense(10, activation = "softmax")) cnn_bn_model.summary()
Digit Recognizer
3,287,070
num_words = len(tokenizer.word_index) print(f"Number of unique words: {num_words}" )<categorify>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
3,287,070
embedding_matrix=np.zeros(( num_words,100)) for word,i in tokenizer.word_index.items() : if i < num_words: emb_vec = embedding_dict.get(word) if emb_vec is not None: embedding_matrix[i] = emb_vec<choose_model_class>
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) cnn_bn_model.compile(optimizer=optimizer, loss="categorical_crossentropy", metrics=['accuracy'] )
Digit Recognizer
3,287,070
n_latent_factors = 100 model_glove = Sequential() model_glove.add(layers.Embedding(num_words, n_latent_factors, weights = [embedding_matrix], input_length = max_len, trainable=True)) model_glove.add(layers.Flatten()) model_glove.add(layers.Dropout(0.5)) model_glove.add(layers.Dense(1, activation='sigmoid')) model_glove.summary()<train_model>
checkpointer = ModelCheckpoint(filepath='final.model.hdf5', verbose=1, save_best_only=True) hist = cnn_bn_model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=20, validation_data=(X_val, Y_val), callbacks=[checkpointer, learning_rate_reduction], verbose=2, shuffle=True, steps_per_epoch=X_train.shape[0] // batch_size)
Digit Recognizer
3,287,070
model_glove.compile(optimizer = optimizers.RMSprop(lr=0.001), loss = losses.binary_crossentropy, metrics = [metrics.binary_accuracy]) history = model_glove.fit(X_train_seq, y_train, epochs=20, batch_size=512, validation_data=(X_val_seq, y_val))<categorify>
cnn_bn_model.load_weights('final.model.hdf5' )
Digit Recognizer
3,287,070
max_len = 15 X_train_seq = tokenizer.texts_to_sequences(train_data['prep_text']) X_test_seq = tokenizer.texts_to_sequences(test_data['text']) X_train_seq = pad_sequences(X_train_seq, maxlen = max_len, truncating = 'post', padding = 'post') X_test_seq = pad_sequences(X_test_seq, maxlen = max_len, truncating = 'post', padding = 'post') y_train = np.array(train_data['target'] ).astype(int) print(f"X_train shape: {X_train_seq.shape}") print(f"X_test shape: {X_test_seq.shape}") print(f"y_train shape: {y_train.shape} ") n_latent_factors = 100 model_glove = Sequential() model_glove.add(layers.Embedding(num_words, n_latent_factors, weights = [embedding_matrix], input_length = max_len, trainable=True)) model_glove.add(layers.Flatten()) model_glove.add(layers.Dropout(0.5)) model_glove.add(layers.Dense(1, activation='sigmoid')) print(f"{model_glove.summary() } ") model_glove.compile(optimizer = optimizers.RMSprop(lr=0.001), loss = losses.binary_crossentropy, metrics = [metrics.binary_accuracy]) history = model_glove.fit(X_train_seq, y_train, epochs=20, batch_size=512 )<split>
score = cnn_bn_model.evaluate(X_valid, Y_valid, verbose=0) accuracy = 100 * score[1] print(f'Test Accuracy {accuracy:.4f}' )
Digit Recognizer
3,287,070
<save_to_csv><EOS>
submit_result(cnn_bn_model, X_test )
Digit Recognizer
6,293,082
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<import_modules>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg
Digit Recognizer
6,293,082
import pandas as pd import numpy as np from sklearn.metrics import f1_score<load_from_csv>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv") train.shape
Digit Recognizer
6,293,082
train=pd.read_csv('.. /input/nlp-getting-started/train.csv') test=pd.read_csv('.. /input/nlp-getting-started/test.csv' )<count_values>
Y_train = train["label"] X_train = train.drop(labels = ["label"],axis = 1 )
Digit Recognizer
6,293,082
train.target.value_counts()<load_pretrained>
X_train = X_train / 255.0 test = test / 255.0
Digit Recognizer
6,293,082
nltk.download('punkt') nltk.download('stopwords') !pip install contractions nltk.download('wordnet') !pip install pyspellchecker <string_transform>
X_train = X_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
6,293,082
stop_words=nltk.corpus.stopwords.words('english') i=0 wnl=WordNetLemmatizer() stemmer=SnowballStemmer('english') for doc in train.text: doc=re.sub(r'https?://\S+|www\.\S+','',doc) doc=re.sub(r'<.*?>','',doc) doc=re.sub(r'[^a-zA-Z\s]','',doc,re.I|re.A) doc=' '.join([wnl.lemmatize(i)for i in doc.lower().split() ]) doc=contractions.fix(doc) tokens=nltk.word_tokenize(doc) filtered=[token for token in tokens if token not in stop_words] doc=' '.join(filtered) train.text[i]=doc i+=1 i=0 for doc in test.text: doc=re.sub(r'https?://\S+|www\.\S+','',doc) doc=re.sub(r'<.*?>','',doc) doc=re.sub(r'[^a-zA-Z\s]','',doc,re.I|re.A) doc=' '.join([wnl.lemmatize(i)for i in doc.lower().split() ]) doc=contractions.fix(doc) tokens=nltk.word_tokenize(doc) filtered=[token for token in tokens if token not in stop_words] doc=' '.join(filtered) test.text[i]=doc i+=1<categorify>
from keras.utils.np_utils import to_categorical
Digit Recognizer
6,293,082
tfidf=TfidfVectorizer(ngram_range=(1,1),use_idf=True) mat=tfidf.fit_transform(train.text ).toarray() train_df=pd.DataFrame(mat,columns=tfidf.get_feature_names()) test_df=pd.DataFrame(tfidf.transform(test.text ).toarray() ,columns=tfidf.get_feature_names()) train_df.head()<predict_on_test>
Y_train = to_categorical(Y_train, num_classes = 10 )
Digit Recognizer
6,293,082
model=LogisticRegression() model.fit(train_df,train.target) print(f1_score(model.predict(train_df),train.target)) pred=model.predict(test_df )<save_to_csv>
from sklearn.model_selection import train_test_split
Digit Recognizer
6,293,082
pd.DataFrame({ 'id':test.id, 'target':pred } ).to_csv('submission.csv',index=False )<import_modules>
from sklearn.model_selection import train_test_split
Digit Recognizer
6,293,082
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from nltk.corpus import stopwords from nltk.util import ngrams from nltk.stem import WordNetLemmatizer import re from textblob import TextBlob from sklearn.model_selection import train_test_split from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV import tensorflow as tf from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.utils import to_categorical from gensim.models import Word2Vec from gensim.models.keyedvectors import KeyedVectors import time from keras.layers import Dense, Input, Flatten, Dropout from keras.layers import Conv1D, MaxPooling1D, Embedding from keras.models import Sequential from keras import losses from tensorflow.keras.optimizers import Adam from tensorflow.keras.models import Model<load_from_csv>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=10 )
Digit Recognizer
6,293,082
train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') submit_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv' )<count_missing_values>
from tensorflow import keras from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D from keras.optimizers import RMSprop import tensorflow as tf
Digit Recognizer
6,293,082
train_data[train_data['text'].isna() ]<groupby>
model = tf.keras.models.Sequential([ tf.keras.layers.Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'), tf.keras.layers.Conv2D(64,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'), tf.keras.layers.Conv2D(128,(3, 3), activation='relu', input_shape=(28, 28, 1),padding='same'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128,(3, 3), activation='relu',padding='same'), tf.keras.layers.Conv2D(192,(3, 3), activation='relu',padding='same'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(192,(3, 3), activation='relu',padding='same'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Dropout (.5), tf.keras.layers.Flatten() , tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(10, activation='softmax') ] )
Digit Recognizer
6,293,082
train_data.groupby('target' ).count()<set_options>
model.compile(optimizer = keras.optimizers.RMSprop(learning_rate=0.001, rho=0.9), loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
6,293,082
%matplotlib inline<concatenate>
model.fit(X_train, Y_train, batch_size = 86, epochs =50, validation_data =(X_val, Y_val), verbose = 1 )
Digit Recognizer
6,293,082
data = pd.concat([train_data, submit_data]) data.shape<feature_engineering>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label") submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_fourth.csv",index=False )
Digit Recognizer
3,821,292
data['text'] = data['text'].apply(lambda x: re.sub(re.compile(r'https?\S+'), '', x)) data['text'] = data['text'].apply(lambda x: re.sub(re.compile(r'[\//:,.!?@&\-'\`"\_ \ data['text'] = data['text'].apply(lambda x: re.sub(re.compile(r'<.*?>'), '', x)) data['text'] = data['text'].apply(lambda x: re.sub(re.compile("[" u"\U0001F600-\U0001F64F" u"\U0001F300-\U0001F5FF" u"\U0001F680-\U0001F6FF" u"\U0001F1E0-\U0001F1FF" u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" "]+", flags=re.UNICODE), '', x)) data['text'] = data['text'].apply(lambda x: re.sub(re.compile(r'\d'), '', x)) data['text'] = data['text'].apply(lambda x: re.sub(re.compile(r'[^\w]'), ' ', x)) data['text'] = data['text'].str.lower()<split>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
3,821,292
clean_train = data[0:train_data.shape[0]] clean_submit = data[train_data.shape[0]:-1] X_train, X_test, y_train, y_test = train_test_split(clean_train['text'], clean_train['target'], test_size = 0.2, random_state = 4 )<feature_engineering>
train = pd.read_csv(".. /input/train.csv") test = pd.read_csv(".. /input/test.csv" )
Digit Recognizer
3,821,292
def tfidf(words): tfidf_vectorizer = TfidfVectorizer() data_feature = tfidf_vectorizer.fit_transform(words) return data_feature, tfidf_vectorizer X_train_tfidf, tfidf_vectorizer = tfidf(X_train.tolist()) X_test_tfidf = tfidf_vectorizer.transform(X_test.tolist() )<train_model>
X_train = train.iloc[:, 1:].values.reshape(-1,28,28,1) Y_train = train.iloc[:, 0] x_test = test.values.reshape(-1,28,28,1 )
Digit Recognizer
3,821,292
lr_tfidf = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', n_jobs = -1) lr_tfidf.fit(X_train_tfidf, y_train) y_predicted_lr = lr_tfidf.predict(X_test_tfidf )<compute_test_metric>
seed_value= 1998 os.environ['PYTHONHASHSEED']=str(seed_value) random.seed(seed_value) np.random.seed(seed_value) tf.set_random_seed(seed_value) config = tf.ConfigProto( device_count = {'GPU': 1, 'CPU': 100}, ) set_session(tf.Session(config=config)) print(tensorflow_backend._get_available_gpus() )
Digit Recognizer
3,821,292
def score_metrics(y_test, y_predicted): accuracy = accuracy_score(y_test, y_predicted) precision = precision_score(y_test, y_predicted) recall = recall_score(y_test, y_predicted) print("accuracy = %0.3f, precision = %0.3f, recall = %0.3f" %(accuracy, precision, recall))<compute_test_metric>
X_train = X_train/255 x_test = x_test/255
Digit Recognizer
3,821,292
score_metrics(y_test, y_predicted_lr )<train_on_grid>
KFold = 5 Folds = StratifiedKFold(n_splits=KFold, shuffle=True ).split(X_train, Y_train )
Digit Recognizer
3,821,292
pipeline = Pipeline([ ('clf', DecisionTreeClassifier(splitter='random', class_weight='balanced')) ]) parameters = { 'clf__max_depth':(150,160,165), 'clf__min_samples_split':(18,20,23), 'clf__min_samples_leaf':(5,6,7) } df_tfidf = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=-1, scoring='f1') df_tfidf.fit(X_train_tfidf, y_train) print(df_tfidf.best_estimator_.get_params() )<predict_on_test>
classifiers = [] for train_idx, valid_idx in Folds: FOLD = len(classifiers)+1 print("STARTED TRAINING ON FOLD-%d"%(FOLD)) x_train, x_valid = X_train[train_idx], X_train[valid_idx] y_train, y_valid = to_categorical(Y_train[train_idx], 10), to_categorical(Y_train[valid_idx], 10) datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=10, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False ) datagen.fit(x_train) callbacks = [ EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='auto', restore_best_weights=True), ModelCheckpoint(filepath='MNIST_DigitRecognizer_CNN_Fold%d.hdf5'%(FOLD), monitor='val_loss', save_best_only=True, verbose=1), ReduceLROnPlateau(monitor='val_loss', patience=3, verbose=1, factor=0.5, min_lr=0.000001) ] clf = Sequential([ Conv2D(32, kernel_size=(3,3), input_shape=(28,28,1), activation='relu', padding='same', kernel_initializer='glorot_uniform'), Conv2D(32, kernel_size=(3,3), activation='relu', padding='same', kernel_initializer='glorot_uniform'), MaxPooling2D() , Conv2D(64, kernel_size=(3,3), activation='relu', padding='same', kernel_initializer='glorot_uniform'), Conv2D(64, kernel_size=(3,3), activation='relu', padding='same', kernel_initializer='glorot_uniform'), MaxPooling2D() , Dropout(rate=0.15), Flatten() , Dense(512, activation='relu', kernel_initializer='glorot_uniform'), Dropout(rate=0.5), Dense(10, activation='softmax', kernel_initializer='glorot_uniform') ]) clf.compile(loss='categorical_crossentropy', optimizer=adam(lr=0.001)) train_history = clf.fit_generator( datagen.flow(x_train, y_train, batch_size=32), validation_data=(x_valid, y_valid), steps_per_epoch=len(x_train)/32, epochs=50, workers=6, use_multiprocessing=True, callbacks = callbacks, verbose=0 ) plot_eval_result(train_hist = train_history) classifiers.append(clf)
Digit Recognizer
3,821,292
y_predicted_dt = df_tfidf.predict(X_test_tfidf )<compute_test_metric>
final_result = sum(clf.predict(x_test)for clf in classifiers)/KFold final_result = np.argmax(final_result, axis=1 )
Digit Recognizer
3,821,292
score_metrics(y_test, y_predicted_dt )<install_modules>
y_pred = pd.DataFrame(data = final_result, index=list(range(1, test.shape[0]+1)) , columns = ['Label']) y_pred.to_csv("output.csv", index=True, index_label='ImageId' )
Digit Recognizer
3,070,649
!pip install gensim -i http://pypi.douban.com/simple --trusted-host pypi.douban.com<string_transform>
%reload_ext autoreload %autoreload 2 %matplotlib inline
Digit Recognizer
3,070,649
stop_words = stopwords.words('english') for word in ['us','no','yet']: stop_words.append(word) data_list = [] text_series = data['text'] for i in range(len(text_series)) : content = text_series.iloc[i] cutwords = [word for word in content.split(' ')if word not in stop_words if len(word)!= 0] data_list.append(cutwords )<define_variables>
from fastai import * from fastai.vision import * import numpy as np import pandas as pd import re
Digit Recognizer
3,070,649
for i in range(len(data_list)) : content = data_list[i] if len(content)<1: print(i )<load_pretrained>
df_train = pd.read_csv('.. /input/train.csv') df_train['fn'] = df_train.index df_train.head()
Digit Recognizer
3,070,649
word2vec_path='./GoogleNews-vectors-negative300.bin.gz' word2vec_model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_path, binary=True )<feature_engineering>
data =(src.transform(tfms=(rand_pad(padding=5,size=28,mode='zeros'),[])) .databunch(num_workers=2,bs=128) .normalize(imagenet_stats))
Digit Recognizer
3,070,649
def get_textVector(data_list, word2vec, textsVectors_list): for i in range(len(data_list)) : words_perText = data_list[i] if len(words_perText)< 1: words_vector = [np.zeros(300)] else: words_vector = [word2vec.wv[k] if k in word2vec_model else np.zeros(300)for k in words_perText] text_vector = np.array(words_vector ).mean(axis=0) textsVectors_list.append(text_vector) return textsVectors_list<prepare_x_and_y>
data.show_batch(rows=3,figsize=(10,7))
Digit Recognizer
3,070,649
textsVectors_list = [] get_textVector(data_list, word2vec_model, textsVectors_list) X = np.array(textsVectors_list )<count_missing_values>
learn = cnn_learner(data,models.resnet50,metrics=accuracy,model_dir='/kaggle/model' )
Digit Recognizer
3,070,649
pd.isnull(X ).any()<split>
learn.lr_find(end_lr=100 )
Digit Recognizer
3,070,649
word2vec_X = X[0:train_data.shape[0]] y = data['target'][0:train_data.shape[0]] word2vec_submit = X[train_data.shape[0]:-1] X_train_word2vec, X_test_word2vec, y_train_word2vec, y_test_word2vec = train_test_split(word2vec_X, y, test_size = 0.2, random_state = 4 )<train_model>
lr = 1e-2
Digit Recognizer
3,070,649
word2vec_lr = LogisticRegression(class_weight = 'balanced', solver = 'lbfgs', n_jobs = -1) word2vec_lr.fit(X_train_word2vec, y_train_word2vec) y_predicted_word2vec_lr = word2vec_lr.predict(X_test_word2vec )<compute_test_metric>
learn.fit_one_cycle(5,slice(lr))
Digit Recognizer
3,070,649
score_metrics(y_test_word2vec, y_predicted_word2vec_lr )<define_variables>
learn.unfreeze() learn.lr_find() learn.recorder.plot()
Digit Recognizer
3,070,649
compare_list = [] for(i,j)in zip(y_test_word2vec, y_predicted_word2vec_lr): k = i - j compare_list.append(k) wrong_num = [i for i,j in enumerate(compare_list)if j != 0] text_series[0:train_data.shape[0]][wrong_num]<define_variables>
learn.fit_one_cycle(8,slice(2e-5,lr/5))
Digit Recognizer
3,070,649
lenlen = [] for i in range(len(data_list)) : content = data_list[i] perlen = len(content) lenlen.append(perlen) print(max(lenlen))<define_variables>
df_test = pd.read_csv('.. /input/test.csv') df_test['label'] = 0 df_test['fn'] = df_test.index df_test.head()
Digit Recognizer
3,070,649
max_sequence_length = 26 embedding_dim = 300<train_model>
learn.data.add_test(PixelImageItemList.from_df(df_test,path='.',cols='fn'))
Digit Recognizer
3,070,649
tokenizer = Tokenizer() tokenizer.fit_on_texts(data_list) sequences = tokenizer.texts_to_sequences(data_list) word_index = tokenizer.word_index cnn_data = pad_sequences(sequences, maxlen = max_sequence_length) cnn_label = to_categorical(np.asarray(train_data['target'])) print('len of word_index:', len(word_index)) print('shape of data tensor:', cnn_data.shape) print('shape of label tensoe:', cnn_label.shape )<split>
pred_test = learn.get_preds(ds_type=DatasetType.Test )
Digit Recognizer
3,070,649
trainCNN_data = cnn_data[0:train_data.shape[0]] X_train_cnn, X_test_cnn, y_train_cnn, y_test_cnn = train_test_split(trainCNN_data, cnn_label, test_size = 0.2, random_state = 4) X_cnn, X_val_cnn, y_cnn, y_val_cnn = train_test_split(X_train_cnn, y_train_cnn, test_size = 0.2, random_state = 4 )<choose_model_class>
test_result = torch.argmax(pred_test[0],dim=1) result = test_result.numpy()
Digit Recognizer
3,070,649
<train_model><EOS>
final = pd.Series(result,name='Label') submission = pd.concat([pd.Series(range(1,28001),name='ImageId'),final],axis=1) submission.to_csv('fastai-res34-0.992.csv',index=False )
Digit Recognizer
8,872,016
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
!pip install emnist
Digit Recognizer
8,872,016
test_loss, test_acc = CNNmodel.evaluate(X_test_cnn, y_test_cnn, verbose=2) print('test loss:',test_loss) print('test acc:',test_acc )<prepare_x_and_y>
import matplotlib.pyplot as plt,seaborn as sns,pandas as pd,numpy as np from keras.models import Sequential, load_model from keras.layers.core import Dense, Dropout, Activation from keras.layers import Conv2D, MaxPooling2D,MaxPool2D,Flatten,BatchNormalization from keras.utils import np_utils from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau from emnist import extract_training_samples from emnist import extract_test_samples from keras.optimizers import Adam
Digit Recognizer
8,872,016
embedding_matrix = np.zeros(( len(word_index)+ 1, embedding_dim)) for word, i in word_index.items() : if word in word2vec_model: embedding_matrix[i] = np.asarray(word2vec_model.wv[word] )<choose_model_class>
x_train, y_train = extract_training_samples('digits') x_test, y_test = extract_test_samples('digits' )
Digit Recognizer
8,872,016
embedding_layer = Embedding(len(word_index)+1, embedding_dim, weights = [embedding_matrix], input_length = max_sequence_length, trainable = False )<choose_model_class>
in_train_data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') ex_y_train = in_train_data["label"] ex_x_train = in_train_data.drop(labels = ["label"],axis = 1 )
Digit Recognizer
8,872,016
model = Sequential() model.add(embedding_layer) model.add(Conv1D(filters=150, kernel_size=3, strides=1, padding='valid', activation = 'relu')) model.add(MaxPooling1D(pool_size=3)) model.add(Flatten()) model.add(Dense(embedding_dim, activation='relu')) model.add(Dropout(0.8)) model.add(Dense(cnn_label.shape[1], activation='sigmoid')) model.summary()<train_model>
X_train = x_train.reshape(240000, 28, 28,1) X_test = x_test.reshape(40000, 28, 28,1) ex_x_train = ex_x_train.values.reshape(42000,28,28,1) X_train = np.vstack(( X_train, ex_x_train)) print(X_train.shape )
Digit Recognizer
8,872,016
model.compile(optimizer='adam', loss=losses.binary_crossentropy, metrics=['accuracy']) history = model.fit(X_cnn, y_cnn, epochs=10, validation_data=(X_val_cnn, y_val_cnn))<compute_test_metric>
X_train = X_train.astype('float32') X_test = X_test.astype('float32' )
Digit Recognizer
8,872,016
test_loss, test_acc = model.evaluate(X_test_cnn, y_test_cnn, verbose=2) print('test loss:',test_loss) print('test acc:',test_acc )<import_modules>
X_train /= 255 X_test /= 255
Digit Recognizer
8,872,016
tf.__version__<import_modules>
y_train = np.concatenate([y_train,ex_y_train.values]) print(y_train.shape )
Digit Recognizer
8,872,016
hub.__version__<load_from_url>
n_classes = 10 print("Shape before one-hot encoding: ", y_train.shape) Y_train = np_utils.to_categorical(y_train, n_classes) Y_test = np_utils.to_categorical(y_test, n_classes) print("Shape after one-hot encoding: ", Y_train.shape )
Digit Recognizer
8,872,016
!wget --quiet https://raw.githubusercontent.com/tensorflow/models/master/official/nlp/bert/tokenization.py<import_modules>
model = Sequential() model.add(Conv2D(filters=32, kernel_size=(5,5), padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPool2D(pool_size = 2,strides=2)) model.add(Conv2D(filters=48, kernel_size=(5,5), padding='valid', activation='relu')) model.add(MaxPool2D(pool_size = 2,strides=2)) model.add(Flatten()) model.add(Dense(120, activation='relu')) model.add(Dense(84, activation='relu')) model.add(Dense(10, activation='softmax')) adam = Adam(lr=5e-4) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=adam )
Digit Recognizer
8,872,016
import tensorflow as tf from tensorflow.keras.callbacks import ModelCheckpoint import tensorflow_hub as hub import tokenization<categorify>
reduce_lr = ReduceLROnPlateau(monitor='val_acc', patience=3, verbose=1, factor=0.2, min_lr=1e-6 )
Digit Recognizer