kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
14,568,394
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<correct_missing_values>
test_df = pd.read_csv('.. /input/digit-recognizer/test.csv') train_df = pd.read_csv('.. /input/digit-recognizer/train.csv') print('Shape of the testing dataset:', test_df.shape) print('Shape of the training dataset:', train_df.shape )
Digit Recognizer
14,568,394
fill_missing_values(train_data) fill_missing_values(validation_data )<load_from_csv>
train_labels = train_df['label'] train_images = train_df.drop('label', axis=1) train_images = train_images.astype('float32')/255 test_images = test_df.astype('float32')/255 train_images = np.array(train_images ).reshape(42000,28,28,1) test_images = np.array(test_images ).reshape(28000,28,28,1) train_images, validation_images, train_labels, validation_labels = train_test_split(train_images, train_labels, test_size=0.1, random_state=0) train_labels = to_categorical(train_labels) validation_labels = to_categorical(validation_labels )
Digit Recognizer
14,568,394
test_data = pd.read_csv('test_stg2.tsv',sep='\t') test = test_data.copy()<count_missing_values>
datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.1, zoom_range=0.05, horizontal_flip=False, fill_mode='nearest') datagen.fit(train_images )
Digit Recognizer
14,568,394
test_data.isnull().sum()<string_transform>
model = models.Sequential() model.add(layers.Conv2D(32,(3, 3), activation='relu', padding='same', input_shape=(28, 28, 1))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(64,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(128,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(256,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Flatten()) model.add(layers.Dense(512, activation='relu')) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer=optimizers.Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['acc'] )
Digit Recognizer
14,568,394
create_split_categories(test_data) fill_missing_values(test_data )<feature_engineering>
lr_reduction = ReduceLROnPlateau(monitor = 'val_acc', factor = 0.5, min_lr = 1e-6) checkpoint = ModelCheckpoint('./trainedModel.hdf5',monitor = 'val_acc', mode = "max", save_best_model = True )
Digit Recognizer
14,568,394
train_data['log_prices']= np.log(train_data['price']+1 )<feature_engineering>
history = model.fit(datagen.flow(train_images, train_labels, batch_size=100), epochs=50, validation_data=(validation_images, validation_labels), callbacks = [lr_reduction, checkpoint] )
Digit Recognizer
14,568,394
validation_data['log_prices']= np.log(validation_data['price']+1 )<load_pretrained>
pred = model.predict(test_images )
Digit Recognizer
14,568,394
nltk.download('stopwords' )<string_transform>
pred = np.argmax(pred, axis=-1) sub = pd.read_csv('.. /input/digit-recognizer/sample_submission.csv') sub['Label'] = pred sub.head()
Digit Recognizer
14,568,394
stop = stopwords.words('english') def remove_stop_words(x): x = ' '.join([i for i in x.lower().split(' ')if i not in stop]) return x<feature_engineering>
sub.to_csv('submission.csv', index=False )
Digit Recognizer
14,568,394
<categorify><EOS>
sub.to_csv('submission.csv', index=False )
Digit Recognizer
14,595,493
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
import numpy as np import matplotlib.pyplot as plt from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.utils import np_utils from keras.datasets import mnist import pandas as pd
Digit Recognizer
14,595,493
train_data['item_description']=text_preprocessing(train_data['item_description']) validation_data['item_description']=text_preprocessing(validation_data['item_description']) test_data['item_description']=text_preprocessing(test_data['item_description']) train_data['name']=text_preprocessing(train_data['name']) validation_data['name']=text_preprocessing(validation_data['name']) test_data['name']=text_preprocessing(test_data['name'] )<string_transform>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
14,595,493
print(train_data['item_description'].iloc[33],len(train_data['item_description'].iloc[33].split(' '))) print(train['item_description'].iloc[33],len(train['item_description'].iloc[33].split(' ')) )<drop_column>
X_train, X_test, y_train, y_test = train_test_split(train.iloc[:, 1:].values, train.iloc[:, 0].values, test_size=0.1 )
Digit Recognizer
14,595,493
def clean_cat(cat_col): cat_list = [] for i in tqdm(cat_col.values): i = re.sub('[^A-Za-z0-9]+', ' ', i) i = i.replace(' ','') i = i.replace('&','_') cat_list.append(i.strip()) return cat_list<concatenate>
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1 )
Digit Recognizer
14,595,493
train_data['sub_category1'] = clean_cat(train_data['sub_category1']) validation_data['sub_category1'] = clean_cat(validation_data['sub_category1']) test_data['sub_category1'] = clean_cat(test_data['sub_category1']) train_data['sub_category2'] = clean_cat(train_data['sub_category2']) validation_data['sub_category2'] = clean_cat(validation_data['sub_category2']) test_data['sub_category2'] = clean_cat(test_data['sub_category2']) train_data['sub_category3'] = clean_cat(train_data['sub_category3']) validation_data['sub_category3'] = clean_cat(validation_data['sub_category3']) test_data['sub_category3'] = clean_cat(test_data['sub_category3'] )<feature_engineering>
y_train = np_utils.to_categorical(y_train, 10) y_test = np_utils.to_categorical(y_test, 10) print('Data y origin ', y_train[0]) print('Data y one-hot encoding ',y_train[0] )
Digit Recognizer
14,595,493
train_data['brand_name'] = clean_cat(train_data['brand_name']) validation_data['brand_name'] = clean_cat(validation_data['brand_name']) test_data['brand_name'] = clean_cat(test_data['brand_name'] )<categorify>
model = Sequential() model.add(Conv2D(32,(3, 3), activation='sigmoid', input_shape=(28,28,1))) model.add(Conv2D(32,(3, 3), activation='sigmoid')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Flatten()) model.add(Dense(128, activation='sigmoid')) model.add(Dense(10, activation='softmax'))
Digit Recognizer
14,595,493
countvectorizer=CountVectorizer().fit(train_data['sub_category1']) bow_cat1_train=countvectorizer.transform(train_data['sub_category1']) bow_cat1_val=countvectorizer.transform(validation_data['sub_category1']) bow_cat1_test=countvectorizer.transform(test_data['sub_category1']) print("Shape of sub_category1 features:") print(bow_cat1_train.shape) print(bow_cat1_val.shape) print(bow_cat1_test.shape) print("Some Features are:") print(countvectorizer.get_feature_names() )<categorify>
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'] )
Digit Recognizer
14,595,493
countvectorizer=CountVectorizer().fit(train_data['sub_category2']) bow_cat2_train=countvectorizer.transform(train_data['sub_category2']) bow_cat2_val=countvectorizer.transform(validation_data['sub_category2']) bow_cat2_test=countvectorizer.transform(test_data['sub_category2']) print("Shape of sub_category2 features:") print(bow_cat2_train.shape) print(bow_cat2_val.shape) print(bow_cat2_test.shape) print("Some Features are: ") print(countvectorizer.get_feature_names() [50:60] )<categorify>
datagen = ImageDataGenerator( rotation_range= 10, zoom_range = 0.1, width_shift_range = 0.1, height_shift_range = 0.1 ) datagen.fit(X_train )
Digit Recognizer
14,595,493
countvectorizer=CountVectorizer().fit(train_data['sub_category3']) bow_cat3_train=countvectorizer.transform(train_data['sub_category3']) bow_cat3_val=countvectorizer.transform(validation_data['sub_category3']) bow_cat3_test=countvectorizer.transform(test_data['sub_category3']) ("Shape of sub_category3 features:") print(bow_cat3_train.shape) print(bow_cat3_val.shape) print(bow_cat3_test.shape) print("Some Features are: ") print(countvectorizer.get_feature_names() [200:210] )<categorify>
train_generator = datagen.flow(X_train, y_train, batch_size = 32) validation_generator = datagen.flow(X_test, y_test, batch_size = 32 )
Digit Recognizer
14,595,493
countvectorizer=CountVectorizer().fit(train_data['brand_name']) bow_brand_train=countvectorizer.transform(train_data['brand_name']) bow_brand_val=countvectorizer.transform(validation_data['brand_name']) bow_brand_test=countvectorizer.transform(test_data['brand_name']) ("Shape of brand_name features:") print(bow_brand_train.shape) print(bow_brand_val.shape) print(bow_brand_test.shape) print("Some Features are: ") print(countvectorizer.get_feature_names() [35:45] )<categorify>
%%time H = model.fit_generator(train_generator, steps_per_epoch = X_train.shape[0] // 32, epochs=50, validation_data = validation_generator, validation_steps = X_test.shape[0] // 32, verbose=1 )
Digit Recognizer
14,595,493
countvectorizer=CountVectorizer(min_df=10 ).fit(train_data['name']) bow_name_train=countvectorizer.transform(train_data['name']) bow_name_val=countvectorizer.transform(validation_data['name']) bow_name_test=countvectorizer.transform(test_data['name']) print("After Vectorization of name feature: ") print(bow_name_train.shape) print(bow_name_val.shape) print(bow_name_test.shape) print("Some Features are: ") print(countvectorizer.get_feature_names() [210:220] )<categorify>
score = model.evaluate(X_test, y_test, verbose=0) print(score )
Digit Recognizer
14,595,493
tfidfvectorizer=TfidfVectorizer(ngram_range=(1,2),min_df=10,max_features=5000 ).fit(train_data['item_description']) tfidf_description_train=tfidfvectorizer.transform(train_data['item_description']) tfidf_description_val=tfidfvectorizer.transform(validation_data['item_description']) tfidf_description_test=tfidfvectorizer.transform(test_data['item_description']) print("After Vectorization of item description feature: ") print(tfidf_description_train.shape) print(tfidf_description_val.shape) print(tfidf_description_test.shape) print("Some Features are: ") print(tfidfvectorizer.get_feature_names() [222:234] )<categorify>
plt.imshow(X_test[0].reshape(28,28), cmap='gray') y_predict = model.predict(X_test[0].reshape(1,28,28,1)) print('Value: ', np.argmax(y_predict))
Digit Recognizer
14,595,493
features_train = csr_matrix(pd.get_dummies(train_data[['item_condition_id', 'shipping']],sparse=True ).values) features_val = csr_matrix(pd.get_dummies(validation_data[['item_condition_id', 'shipping']],sparse=True ).values) features_test = csr_matrix(pd.get_dummies(test_data[['item_condition_id', 'shipping']],sparse=True ).values) print(features_train.shape) print(features_val.shape) print(features_test.shape )<compute_train_metric>
%%time y_pred = model.predict(sub )
Digit Recognizer
14,595,493
linearregression=LinearRegression(normalize=True) linearregression.fit(X_train,train_data['log_prices']) ytrain_predict=linearregression.predict(X_train) yval_predict=linearregression.predict(X_val) train_error=np.sqrt(mean_squared_log_error(train_data['log_prices'],ytrain_predict)) val_error=np.sqrt(mean_squared_log_error(validation_data['log_prices'],yval_predict)) print(" Linear Regression RMSLE on train is {} RMSLE on cv is {}".format(train_error,val_error)) <predict_on_test>
y_sub = np.argmax(y_pred, 1 )
Digit Recognizer
14,595,493
yval_linear=linearregression.predict(X_val) ytest_linear=linearregression.predict(X_test )<load_from_csv>
submission = pd.DataFrame({'ImageId': np.arange(1, 28001), 'Label': y_sub}) submission.to_csv("submission.csv", index = False) print("Your submission was successfully saved!" )
Digit Recognizer
14,595,493
submission_data = pd.read_csv('sample_submission_stg2.csv') submission_data.head(5 )<feature_engineering>
model.save("my_model" )
Digit Recognizer
14,335,264
submission_data.loc[:, 'price'] = np.expm1(ytest_linear )<save_to_csv>
transform = transforms.Normalize(0, 255, inplace=False) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') submit_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') features_train = transform(torch.Tensor(np.reshape(train_df.iloc[:,1:].to_numpy() ,(-1,1,28,28)))) labels_train = torch.LongTensor(train_df['label'].to_numpy()) features_submit = transform(torch.Tensor(np.reshape(submit_df.to_numpy() ,(-1,28,28)))) train_len = int(features_train.shape[0]*0.6) valid_len = int(features_train.shape[0]*0.2) train_set = torch.utils.data.TensorDataset(features_train[:train_len],labels_train[:train_len]) valid_set = torch.utils.data.TensorDataset(features_train[train_len:(train_len+valid_len)],labels_train[train_len:(train_len+valid_len)]) test_set = torch.utils.data.TensorDataset(features_train[(train_len+valid_len):],labels_train[(train_len+valid_len):]) train_loader = DataLoader(train_set, batch_size = 50) valid_loader = DataLoader(valid_set, batch_size = 50) test_loader = DataLoader(test_set, batch_size = 50 )
Digit Recognizer
14,335,264
submission_data.to_csv('submission.csv', index=False )<load_pretrained>
class CNN_classifier(nn.Module): def __init__(self): super(CNN_classifier, self ).__init__() self.conv1 = nn.Conv2d(1, 32, 5, padding=2) self.conv2 = nn.Conv2d(32, 32, 5, padding=2) self.conv3 = nn.Conv2d(32, 64, 3, padding=1) self.pool = nn.MaxPool2d(2, 2) self.fc1 = nn.Linear(64*3*3, 256) self.fc2 = nn.Linear(256, 10) self.dropout = nn.Dropout(0.25) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv2(x))) x = self.dropout(x) x = self.pool(F.relu(self.conv3(x))) x = self.dropout(x) x = x.view(-1, 64*3*3) x = F.relu(self.fc1(x)) x = self.dropout(x) x = self.fc2(x) return x model = CNN_classifier() print(model )
Digit Recognizer
14,335,264
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/test.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/sample_submission.csv.7z<load_from_csv>
criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters() , lr=0.01) if torch.cuda.is_available() : model = model.cuda() criterion = criterion.cuda()
Digit Recognizer
14,335,264
!unzip /kaggle/input/mercari-price-suggestion-challenge/sample_submission_stg2.csv.zip !unzip /kaggle/input/mercari-price-suggestion-challenge/test_stg2.tsv.zip<import_modules>
n_epochs = 10 valid_plot = [] train_plot = [] valid_loss_min = np.Inf for epoch in range(1, n_epochs+1): train_loss = 0.0 valid_loss = 0.0 model.train() for data, target in train_loader: if torch.cuda.is_available() : data = data.cuda() target = target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += loss.item() *data.size(0) model.eval() for data, target in valid_loader: if torch.cuda.is_available() : data = data.cuda() target = target.cuda() output = model(data) loss = criterion(output, target) valid_loss += loss.item() *data.size(0) train_loss = train_loss/len(train_loader.sampler) valid_loss = valid_loss/len(valid_loader.sampler) train_plot.append(train_loss) valid_plot.append(valid_loss) print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format( epoch, train_loss, valid_loss)) if valid_loss <= valid_loss_min: print('Validation loss decreased({:.6f} --> {:.6f} ).Saving model...'.format( valid_loss_min, valid_loss)) torch.save(model.state_dict() , 'model_mnist_cnn.pt') valid_loss_min = valid_loss
Digit Recognizer
14,335,264
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import Ridge, LogisticRegression from sklearn.model_selection import train_test_split, cross_val_score from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import LabelBinarizer<load_from_csv>
test_loss = 0.0 correct_pred = 0 false_images = torch.empty(( 1, 28, 28)) false_preds = [] false_targets = [] model.eval() for data, target in test_loader: if torch.cuda.is_available() : data = data.cuda() target = target.cuda() output = model(data) loss = criterion(output, target) test_loss += loss.item() *data.size(0) _, pred = torch.max(output, 1) correct_pred += torch.sum(pred == target ).item() true_pred = pred==target for i, value in enumerate(true_pred): if value.item() == False: false_images = torch.cat(( false_images, data[i]), dim=0) false_preds.append(pred[i]) false_targets.append(target[i]) false_images = false_images[1:] test_loss = test_loss/len(test_loader.dataset) accuracy = correct_pred /len(test_loader.dataset) print('Test Loss: {:.6f} '.format(test_loss)) print('Accuracy: {:.2f}%'.format(accuracy*100))
Digit Recognizer
14,335,264
train = pd.read_table('train.tsv') test = pd.read_table('test_stg2.tsv' )<count_missing_values>
data = features_submit.view(-1,1,28,28) if torch.cuda.is_available() : data = data.cuda() target = target.cuda() output = model(data) score, predictions = torch.topk(output, 1 )
Digit Recognizer
14,335,264
<set_options><EOS>
ImageId = np.arange(1,predictions.size() [0]+1) Label = predictions.view(predictions.size() [0] ).numpy() new_submission = pd.DataFrame({'ImageId': ImageId, 'Label': Label}) new_submission.to_csv('my_submission.csv', index=False) print("Your submission was successfully saved!" )
Digit Recognizer
14,468,122
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<data_type_conversions>
import numpy as np import pandas as pd
Digit Recognizer
14,468,122
train['brand_name'] = train['brand_name'].fillna('Nobrand') test['brand_name'] = test['brand_name'].fillna('Nobrand' )<data_type_conversions>
training_data = pd.read_csv(".. /input/digit-recognizer/train.csv") test_images = pd.read_csv(".. /input/digit-recognizer/test.csv") train_images = training_data.drop("label", axis=1) train_labels = training_data["label"]
Digit Recognizer
14,468,122
train['category_name'] = train['category_name'].fillna('No/No/No') test['category_name'] =test['category_name'].fillna('No/No/No' )<data_type_conversions>
train_images.to_numpy() train_labels.to_numpy() test_images.to_numpy()
Digit Recognizer
14,468,122
train['item_description'] = train['item_description'].fillna('No Description') test['item_description'] = test['item_description'].fillna('No Description' )<string_transform>
train_images = train_images / 255.0 test_images = test_images / 255.0
Digit Recognizer
14,468,122
def split_cat(category_name): try: return category_name.split('/') return("done") except: return ['Null', 'Null', 'Null']<feature_engineering>
train_labels = to_categorical(train_labels )
Digit Recognizer
14,468,122
train['cat_1'], train['cat_2'], train['cat_3'] = zip(*train['category_name'].apply(lambda x:split_cat(x))) test['cat_1'], test['cat_2'], test['cat_3'] = zip(*test['category_name'].apply(lambda x: split_cat(x))) <count_values>
network = models.Sequential() network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,))) network.add(layers.Dense(10, activation='softmax')) network.summary()
Digit Recognizer
14,468,122
( train['shipping'].value_counts())*100/train.shape[0]<count_values>
network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'] )
Digit Recognizer
14,468,122
brands =train['brand_name'].value_counts() print(brands[:10] )<count_unique_values>
network.fit(train_images, train_labels, epochs = 5, batch_size=128 )
Digit Recognizer
14,468,122
<count_unique_values><EOS>
submit = pd.DataFrame(np.argmax(network.predict(test_images), axis=1), columns=['Label'], index=pd.read_csv('.. /input/digit-recognizer/sample_submission.csv')['ImageId']) submit.index.name = 'ImageId' submit.to_csv('submittion.csv' )
Digit Recognizer
14,355,205
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<count_unique_values>
import numpy as np from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split import itertools import tensorflow as tf from tensorflow import keras from keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import pandas as pd import seaborn as sns
Digit Recognizer
14,355,205
print("Có %d nhãn ở cột cat_3." % train['cat_3'].nunique() )<count_values>
mnist_train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") mnist_test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
14,355,205
train['name'].value_counts() [:10]<categorify>
mnist_train
Digit Recognizer
14,355,205
lb_item_condition_id = LabelBinarizer(sparse_output=True) train_condition = lb_item_condition_id.fit_transform(train['item_condition_id']) test_condition = lb_item_condition_id.transform(test['item_condition_id'] )<categorify>
x_train = mnist_train.drop(labels = "label", axis = 1) x_train
Digit Recognizer
14,355,205
lb_shipping = LabelBinarizer(sparse_output=True) train_shipping = lb_shipping.fit_transform(train['shipping']) test_shipping = lb_shipping.transform(test['shipping'] )<categorify>
y_train = mnist_train["label"] y_train
Digit Recognizer
14,355,205
lb_brand_name = LabelBinarizer(sparse_output=True) train_brand_name= lb_brand_name.fit_transform(train['brand_name']) test_brand_name = lb_brand_name.transform(test['brand_name']) <feature_engineering>
x_train = x_train/255.0 mnist_test = mnist_test/255.0
Digit Recognizer
14,355,205
count_vec = CountVectorizer() train_name = count_vec.fit_transform(train['name']) test_name = count_vec.transform(test['name'] )<categorify>
x_train = x_train.values.reshape(-1,28,28,1) mnist_test = mnist_test.values.reshape(-1,28,28,1) x_train.shape
Digit Recognizer
14,355,205
tfidf_des = TfidfVectorizer(max_features=50000, ngram_range=(1, 3), stop_words='english') train_des = tfidf_des.fit_transform(train['item_description']) test_des = tfidf_des.transform(test['item_description'] )<categorify>
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10 )
Digit Recognizer
14,355,205
lb_cat_1 = LabelBinarizer(sparse_output=True) train_cat_1 = lb_cat_1.fit_transform(train['cat_1']) test_cat_1 = lb_cat_1.transform(test['cat_1']) lb_cat_2 = LabelBinarizer(sparse_output=True) train_cat_2 = lb_cat_2.fit_transform(train['cat_2']) test_cat_2 = lb_cat_2.transform(test['cat_2']) lb_cat_3 = LabelBinarizer(sparse_output=True) train_cat_3 = lb_cat_3.fit_transform(train['cat_3']) test_cat_3 = lb_cat_3.transform(test['cat_3'] )<import_modules>
datagen = ImageDataGenerator(rotation_range=12, zoom_range = 0.15, width_shift_range=0.15, height_shift_range=0.13) datagen.fit(x_train) datagen.fit(x_test )
Digit Recognizer
14,355,205
from scipy.sparse import hstack import gc<define_variables>
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.15) print(x_train.shape) print(x_val.shape )
Digit Recognizer
14,355,205
sparse_matrix_list =(train_name, train_des, train_brand_name, train_condition, train_shipping, train_cat_1, train_cat_2, train_cat_3 )<compute_test_metric>
input_layer = tf.keras.layers.Input(shape =(28, 28, 1)) hidden_layer_1 = tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation=tf.keras.activations.relu )(input_layer) norm_1 = tf.keras.layers.BatchNormalization()(hidden_layer_1) max_1 = tf.keras.layers.MaxPooling2D(pool_size=(3,3), strides=(2,2))(norm_1) dropout_layer_1 = tf.keras.layers.Dropout(0.25 )(max_1) flatten_layer = tf.keras.layers.Flatten()(dropout_layer_1) hidden_layer_3 = tf.keras.layers.Dense(256, activation=tf.keras.activations.relu )(flatten_layer) hidden_layer_4 = tf.keras.layers.Dense(128, activation=tf.keras.activations.relu )(hidden_layer_3) dropout_layer_3 = tf.keras.layers.Dropout(0.5 )(hidden_layer_4) output_layer = tf.keras.layers.Dense(10, activation=tf.keras.activations.sigmoid )(dropout_layer_3 )
Digit Recognizer
14,355,205
def rmsle(y, y_preds): assert len(y)== len(y_preds) return np.sqrt(np.mean(np.power(np.log1p(y)-np.log1p(y_preds), 2)) )<split>
model = tf.keras.Model(inputs = input_layer, outputs = output_layer) model.summary()
Digit Recognizer
14,355,205
def run_model(model, matrix_list): X = hstack(matrix_list ).tocsr() X_train, x_test, Y_train, y_test = train_test_split(X, np.log1p(train['price']), test_size=0.2) model.fit(X_train, Y_train) preds = model.predict(x_test) del X, X_train, x_test, Y_train gc.collect() return preds, y_test<choose_model_class>
model.compile(optimizer = tf.keras.optimizers.Adam() , loss = tf.keras.losses.CategoricalCrossentropy() , metrics = ['accuracy'] )
Digit Recognizer
14,355,205
model = ridge = Ridge() Ridge_preds, y_test = run_model(model, matrix_list=sparse_matrix_list) <compute_test_metric>
his = model.fit(x_train, y_train, batch_size = 1000, epochs = 40, validation_data=(x_val, y_val))
Digit Recognizer
14,355,205
print("rmsle: "+str(rmsle(np.expm1(y_test), np.expm1(Ridge_preds))))<prepare_x_and_y>
results = model.predict(mnist_test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
14,355,205
<compute_test_metric><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("mnist_result.csv",index=False) submission
Digit Recognizer
11,516,406
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<prepare_x_and_y>
import numpy as np import pandas as pd import tensorflow as tf from tensorflow import keras from keras import Sequential from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D, Dropout from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split
Digit Recognizer
11,516,406
sparse_matrix_list =(train_name, train_des, train_brand_name, train_condition, train_shipping, train_cat_1, train_cat_2, train_cat_3) X_train = hstack(sparse_matrix_list ).tocsr() X_train<prepare_x_and_y>
path = '/kaggle/input/digit-recognizer/' data = pd.read_csv(path +'train.csv') print(data.shape )
Digit Recognizer
11,516,406
y_train = np.log1p(train['price']) y_train<train_model>
label = tf.keras.utils.to_categorical(data['label'].values) X = data.drop('label',axis=1 ).values num_images = X.shape[0] m = X.shape[1] max_pixel = 255 img_dim = np.sqrt(m ).astype(int) out_dim = label.shape[1] print('Number of different labels/output dimension ' + str(out_dim)) X = X.reshape(( num_images,img_dim,img_dim,1)) / max_pixel print('Shape of input data X ' + str(X.shape))
Digit Recognizer
11,516,406
Ridge_model = Ridge() Ridge_model.fit(X_train, y_train )<concatenate>
tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu )
Digit Recognizer
11,516,406
sparse_matrix_list =(test_name, test_des, test_brand_name, test_condition, test_shipping, test_cat_1, test_cat_2, test_cat_3) X_test = hstack(sparse_matrix_list ).tocsr()<predict_on_test>
def create_model() : my_model = Sequential() my_model.add(Conv2D(32, kernel_size =(3,3), activation = 'relu', input_shape =(img_dim,img_dim,1))) my_model.add(Dropout(0.5)) my_model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding="valid")) my_model.add(Conv2D(32, kernel_size =(3,3), activation = 'relu')) my_model.add(Dropout(0.5)) my_model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding="valid")) my_model.add(Flatten()) my_model.add(Dense(units = 500)) my_model.add(Dense(out_dim, activation = 'softmax')) return my_model
Digit Recognizer
11,516,406
preds = Ridge_model.predict(X_test) preds<prepare_output>
with tpu_strategy.scope() : my_model = create_model() my_model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'] )
Digit Recognizer
11,516,406
preds = np.expm1(preds) preds<load_from_csv>
X_train, X_val, y_train, y_val = train_test_split(X, label, test_size = 0.1 )
Digit Recognizer
11,516,406
submission = pd.read_csv('sample_submission_stg2.csv') submission<prepare_output>
BATCH_SIZE = 128 * tpu_strategy.num_replicas_in_sync EPOCHS = 100 STEPS_PER_EPOCH = num_images // BATCH_SIZE
Digit Recognizer
11,516,406
submission.loc[:, 'price'] = preds submission<save_to_csv>
history = my_model.fit(X_train, y_train , epochs = EPOCHS, steps_per_epoch = STEPS_PER_EPOCH, validation_data =(X_val, y_val))
Digit Recognizer
11,516,406
submission.to_csv('submission.csv', index=False )<save_to_csv>
data_test = pd.read_csv(path+'test.csv' ).values num_test = data_test.shape[0] data_test = data_test.reshape(( num_test,img_dim,img_dim,1)) / max_pixel
Digit Recognizer
11,516,406
submission.to_csv('submission.csv', index=False )<import_modules>
datagen = ImageDataGenerator(rotation_range=20, zoom_range = 0.20, width_shift_range=0.15, height_shift_range=0.15 )
Digit Recognizer
11,516,406
import time from scipy.sparse import csr_matrix, hstack from sklearn.preprocessing import LabelEncoder, Normalizer from keras.callbacks import ModelCheckpoint from sklearn.model_selection import train_test_split import sys import os import random import numpy as np from keras import backend as K from nltk.corpus import stopwords import re from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Input, Dropout, Dense, concatenate, GRU, Embedding, Flatten, Activation from keras.optimizers import Adam from keras.models import Model from keras import backend as K<load_pretrained>
Train_x, Train_y = None, None batch = 0 for x_batch, y_batch in datagen.flow(X_train, y_train, batch_size=BATCH_SIZE, shuffle=False): if batch == 0: Train_x, Train_y = x_batch, y_batch elif batch >= len(datagen.flow(X_train, y_train, batch_size=BATCH_SIZE)) : break else: Train_x = np.concatenate(( Train_x, x_batch)) Train_y = np.concatenate(( Train_y, y_batch)) batch += 1
Digit Recognizer
11,516,406
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z <define_variables>
Digit Recognizer
11,516,406
NUM_BRANDS = 4500 NUM_CATEGORIES = 1250<compute_test_metric>
with tpu_strategy.scope() : my_model_aug = create_model() my_model_aug.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'] )
Digit Recognizer
11,516,406
def rmsle(y, y0): assert len(y)== len(y0) return np.sqrt(np.mean(np.power(np.log1p(y)- np.log1p(y0), 2)) )<load_from_csv>
history_aug = my_model_aug.fit(Train_x, Train_y, epochs = EPOCHS, steps_per_epoch = STEPS_PER_EPOCH, validation_data =(X_val, y_val))
Digit Recognizer
11,516,406
train_df = pd.read_table("train.tsv") test_df = pd.read_csv(".. /input/mercari-price-suggestion-challenge/test_stg2.tsv.zip" , sep='\t') <string_transform>
my_predictions = my_model.predict(data_test) imageId = np.arange(len(my_predictions)) results = my_predictions.argmax(axis=1 )
Digit Recognizer
11,516,406
def split_cat(text): try: return text.split("/") except: return("missing", "missing", "missing" )<categorify>
submission = pd.DataFrame(np.array([imageId + 1,results] ).transpose() , columns = ['ImageId','Label']) submission.to_csv('submission.csv', index = False )
Digit Recognizer
11,516,406
def handle_missing_inplace(dataset): dataset['general_cat'].fillna(value='missing', inplace=True) dataset['subcat_1'].fillna(value='missing', inplace=True) dataset['subcat_2'].fillna(value='missing', inplace=True) dataset['brand_name'].fillna(value='missing', inplace=True) dataset['item_description'].fillna(value='No description yet', inplace=True )<count_values>
my_predictions_aug = my_model_aug.predict(data_test) imageId = np.arange(len(my_predictions_aug)) results_aug = my_predictions_aug.argmax(axis=1 )
Digit Recognizer
11,516,406
<data_type_conversions><EOS>
submission_aug = pd.DataFrame(np.array([imageId + 1,results_aug] ).transpose() , columns = ['ImageId','Label']) submission_aug.to_csv('submission_aug.csv', index = False )
Digit Recognizer
14,259,513
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<define_variables>
import numpy as np import pandas as pd import os import torch import torch.nn as nn import torchvision import torchvision.transforms as transforms from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import train_test_split from torch.optim import lr_scheduler import time import random import copy import matplotlib.pyplot as plt from typing import Union, List, Dict, Any, cast from PIL import Image
Digit Recognizer
14,259,513
stopwords = {x: 1 for x in stopwords.words('english')} non_alphanums = re.compile(u'[^A-Za-z0-9]+' )<string_transform>
def seed_torch(seed=1029): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True torch.backends.cudnn.enabled = True seed_torch()
Digit Recognizer
14,259,513
def wordCount(text): try: if text == 'No description yet': return 0 else: text = text.lower() words = [w for w in text.split(" ")] return len(words) except: return 0<string_transform>
BATCH = 32 EPOCHS = 100 LR = 0.001 DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = DEVICE
Digit Recognizer
14,259,513
def normalize_text(text): return u" ".join( [x for x in [y for y in non_alphanums.sub(' ', text ).lower().strip().split(" ")] \ if len(x)> 1 and x not in stopwords]) <data_type_conversions>
all_df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv", dtype=np.float32) test_df = pd.read_csv("/kaggle/input/digit-recognizer/test.csv", dtype=np.float32) sample_sub = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
Digit Recognizer
14,259,513
def normalize_dataset_text(dataset): dataset['item_description'] = dataset['item_description'].apply(lambda x: normalize_text(x)) dataset['brand_name'] = dataset['brand_name'].apply(lambda x: normalize_text(x))<feature_engineering>
train_df, validate_df = train_test_split(all_df, train_size=0.8, test_size=0.2 )
Digit Recognizer
14,259,513
def delete_unseen(dataset): dataset.loc[~dataset['brand_name'].isin(all_brand), 'brand_name'] = 'missing' dataset.loc[~dataset['general_cat'].isin(all_general_cat), 'general_cat'] = 'missing' dataset.loc[~dataset['subcat_1'].isin(all_subcat_1), 'subcat_1'] = 'missing' dataset.loc[~dataset['subcat_2'].isin(all_subcat_2), 'subcat_2'] = 'missing'<feature_engineering>
original_transforms = transforms.Compose([ transforms.ToPILImage() , transforms.ToTensor() ]) sub = [original_transforms] train_transforms = transforms.Compose([ transforms.ToPILImage() , transforms.RandomAffine(degrees=25, translate=(0.1, 0.1), scale=(0.8, 1.2)) , transforms.ToTensor() ] )
Digit Recognizer
14,259,513
def text_length_feature(dataset, train = True): if train: dataset['desc_len'] = dataset['item_description'].apply(lambda x: wordCount(x)) dataset['name_len'] = dataset['name'].apply(lambda x: wordCount(x)) dataset[['desc_len', 'name_len']] = desc_normalizer.fit_transform(dataset[['desc_len', 'name_len']]) else: dataset['desc_len'] = dataset['item_description'].apply(lambda x: wordCount(x)) dataset['name_len'] = dataset['name'].apply(lambda x: wordCount(x)) dataset[['desc_len', 'name_len']] = desc_normalizer.transform(dataset[['desc_len', 'name_len']] )<data_type_conversions>
class GetData(Dataset): def __init__(self, X_Train, Y_Train, Transform): self.X = X_Train self.transform = Transform self.Y = Y_Train def __len__(self): return len(self.X) def __getitem__(self, index): return self.transform(self.X[index] ).contiguous() , self.Y[index]
Digit Recognizer
14,259,513
start_time = time.time() print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))<split>
def getSet(data_df, Transform): data_df.head() Y = data_df.label.values X0 = data_df.loc[:, data_df.columns != 'label'].values/255 Y = torch.from_numpy(Y ).type(torch.LongTensor) X = X0.reshape(( -1, 1, 28, 28)) X = torch.from_numpy(X) return GetData(X, Y, Transform )
Digit Recognizer
14,259,513
train_df, dev_df = train_test_split(train_df, random_state=200, train_size=0.70 )<prepare_x_and_y>
trainset = getSet(train_df, train_transforms) validateset = getSet(validate_df, original_transforms) dataloaders = { 'train': DataLoader(trainset, batch_size=BATCH, shuffle=True, num_workers=4), 'val': DataLoader(validateset, batch_size=BATCH, shuffle=True, num_workers=4)}
Digit Recognizer
14,259,513
train_df = train_df.drop(train_df[(train_df.price < 1.0)].index) train_y = np.log1p(train_df["price"]) dev_y = np.log1p(dev_df["price"] )<feature_engineering>
dataset_sizes = { 'train': len(trainset), 'val': len(validateset)} dataset_sizes
Digit Recognizer
14,259,513
train_df['general_cat'], train_df['subcat_1'], train_df['subcat_2'] = \ zip(*train_df['category_name'].apply(lambda x: split_cat(x))) train_df.drop('category_name', axis=1, inplace=True) print('[{}] Split categories completed.'.format(time.time() - start_time)) handle_missing_inplace(train_df) print('[{}] Handle missing completed.'.format(time.time() - start_time)) cutting(train_df) print('[{}] Cut completed.'.format(time.time() - start_time)) to_categorical(train_df) print('[{}] Convert categorical completed'.format(time.time() - start_time)) <categorify>
class VGG(nn.Module): def __init__( self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True )-> None: super(VGG, self ).__init__() self.features = features self.avgpool = nn.AdaptiveAvgPool2d(( 1, 1)) self.classifier = nn.Sequential( nn.Linear(256, num_classes), ) if init_weights: self._initialize_weights() def forward(self, x: torch.Tensor)-> torch.Tensor: x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x def _initialize_weights(self)-> None: for m in self.modules() : if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.constant_(m.bias, 0) cfgs: Dict[str, List[Union[str, int]]] = { 'A': [64, 'M', 128, 128, 'M', 256, 256, 256, 'M'], 'B': [16, 'M', 256, 'M'], 'firstPadding': 2 } def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False)-> nn.Sequential: layers: List[nn.Module] = [] in_channels = 1 in_padding = 5 i = 0 for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: v = cast(int, v) in_padding = 1 if i == 5: in_padding = 2 conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=in_padding) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v i = i + 1 return nn.Sequential(*layers) def _vgg(arch: str, cfg: str, batch_norm: bool,num_classes: int, **kwargs: Any)-> VGG: model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), num_classes, **kwargs) return model def selfDefineVgg(num_classes: int, **kwargs: Any)-> VGG: return _vgg('vgg', 'A', True, num_classes, **kwargs) model = selfDefineVgg(10) model = model.to(DEVICE )
Digit Recognizer
14,259,513
desc_normalizer = Normalizer() name_normalizer = Normalizer() text_length_feature(train_df) print('[{}] Calculate length features'.format(time.time() - start_time)) normalize_dataset_text(train_df) print('[{}] Normalization text'.format(time.time() - start_time))<categorify>
criterion = nn.CrossEntropyLoss()
Digit Recognizer
14,259,513
all_brand = set(train_df["brand_name"].values) all_general_cat = set(train_df["general_cat"].values) all_subcat_1 = set(train_df["subcat_1"].values) all_subcat_2 = set(train_df["subcat_2"].values) le_brand = LabelEncoder() le_general_cat = LabelEncoder() le_subcat_1 = LabelEncoder() le_subcat_2 = LabelEncoder() le_brand.fit(train_df['brand_name']) train_df['encoded_brand_name'] = le_brand.transform(train_df['brand_name']) le_general_cat.fit(train_df['general_cat']) train_df['encoded_general_cat'] = le_general_cat.transform(train_df['general_cat']) le_subcat_1.fit(train_df['subcat_1']) train_df['encoded_subcat_1'] = le_subcat_1.transform(train_df['subcat_1']) le_subcat_2.fit(train_df['subcat_2']) train_df['encoded_subcat_2'] = le_subcat_2.transform(train_df['subcat_2'] )<feature_engineering>
optimizer = torch.optim.Adam(model.parameters() , lr=LR, betas=(0.9, 0.999), eps=1e-9) scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.7, patience=3, verbose=True )
Digit Recognizer
14,259,513
print("Tokenizing item description") tok_desc = Tokenizer() tok_desc.fit_on_texts(train_df["item_description"].values) print("Tokenizing name") tok_name = Tokenizer() tok_name.fit_on_texts(train_df["name"].values) print("Transforming text to sequences...") train_df['seq_item_description'] = tok_desc.texts_to_sequences(train_df["item_description"].values) train_df['seq_name'] = tok_name.texts_to_sequences(train_df["name"].values) <define_variables>
def train_model(model, criterion, optimizer, scheduler, num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 for epoch in range(num_epochs): for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for inputs, labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if phase == 'train': acc = 100.* running_corrects.double() / dataset_sizes[phase] scheduler.step(acc) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] if phase == 'train': print('Epoch {}/{}'.format(epoch, num_epochs - 1)) print('-' * 10) print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) if phase == 'val' and epoch_acc > best_acc: best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) time_elapsed = time.time() - since print('Training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) print('Best val Acc: {:4f}'.format(best_acc)) model.load_state_dict(best_model_wts) return model
Digit Recognizer
14,259,513
MAX_NAME_SEQ = 15 MAX_ITEM_DESC_SEQ = 50 MAX_DESC_TEXT = len(tok_desc.word_index)+ 1 MAX_NAME_TEXT = len(tok_name.word_index)+ 1 MAX_BRAND = len(le_brand.classes_) MAX_GENCAT = len(le_general_cat.classes_) MAX_SUBCAT_1 = len(le_subcat_1.classes_) MAX_SUBCAT_2 = len(le_subcat_2.classes_) MAX_CONDITION = max(train_df.item_condition_id)+ 1<categorify>
model = train_model(model, criterion, optimizer, scheduler, num_epochs=EPOCHS )
Digit Recognizer
14,259,513
def get_rnn_data(dataset): X = { 'name': pad_sequences(dataset.seq_name, maxlen=MAX_NAME_SEQ), 'item_desc': pad_sequences(dataset.seq_item_description, maxlen=MAX_ITEM_DESC_SEQ), 'brand_name': np.array(dataset.encoded_brand_name), 'item_condition': np.array(dataset.item_condition_id), 'num_vars': np.array(dataset[["shipping"]]), 'desc_len': np.array(dataset[["desc_len"]]), 'name_len': np.array(dataset[["name_len"]]), 'general_cat': np.array(dataset.encoded_general_cat), 'subcat_1': np.array(dataset.encoded_subcat_1), 'subcat_2': np.array(dataset.encoded_subcat_2), } return X<prepare_x_and_y>
y_val = validate_df.label.values x_val = validate_df.loc[:, validate_df.columns != 'label'].values/255 x_val = x_val.reshape(( -1, 1, 28, 28)) print(x_val[0].shape) with torch.no_grad() : model.eval() preds = model(torch.from_numpy(x_val ).to(DEVICE)).cpu().argmax(dim=1)
Digit Recognizer
14,259,513
train_X = get_rnn_data(train_df )<define_variables>
with torch.no_grad() : model.eval() sample_sub['Label'] = model(torch.from_numpy(Xt ).to(DEVICE)).cpu().argmax(dim=1 )
Digit Recognizer
14,259,513
<init_hyperparams><EOS>
sample_sub.to_csv("submission.csv", index=False )
Digit Recognizer
14,596,146
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<train_model>
test_df = pd.read_csv('.. /input/digit-recognizer/test.csv') train_df = pd.read_csv('.. /input/digit-recognizer/train.csv') print('Shape of the testing dataset:', test_df.shape) print('Shape of the training dataset:', train_df.shape )
Digit Recognizer
14,596,146
model = rnn_model(lr=lr_init, decay=lr_decay) model.fit(train_X, train_y, epochs=epochs, batch_size=BATCH_SIZE, verbose=2 )<feature_engineering>
train_labels = train_df['label'] train_images = train_df.drop('label', axis=1) train_images = train_images.astype('float32')/255 test_images = test_df.astype('float32')/255 train_images = np.array(train_images ).reshape(42000,28,28,1) test_images = np.array(test_images ).reshape(28000,28,28,1) train_images, validation_images, train_labels, validation_labels = train_test_split(train_images, train_labels, test_size=0.1, random_state=0) train_labels = to_categorical(train_labels) validation_labels = to_categorical(validation_labels )
Digit Recognizer
14,596,146
dev_df['general_cat'], dev_df['subcat_1'], dev_df['subcat_2'] = \ zip(*dev_df['category_name'].apply(lambda x: split_cat(x))) handle_missing_inplace(dev_df) cutting(dev_df) text_length_feature(dev_df) normalize_dataset_text(dev_df) delete_unseen(dev_df) to_categorical(dev_df )<categorify>
datagen = ImageDataGenerator(rotation_range=10, width_shift_range=0.05, height_shift_range=0.05, shear_range=0.1, zoom_range=0.05, horizontal_flip=False, fill_mode='nearest') datagen.fit(train_images )
Digit Recognizer
14,596,146
dev_df['encoded_brand_name'] = le_brand.transform(dev_df['brand_name']) dev_df['encoded_general_cat'] = le_general_cat.transform(dev_df['general_cat']) dev_df['encoded_subcat_1'] = le_subcat_1.transform(dev_df['subcat_1']) dev_df['encoded_subcat_2'] = le_subcat_2.transform(dev_df['subcat_2']) dev_df['seq_item_description'] = tok_desc.texts_to_sequences(dev_df["item_description"].values) dev_df['seq_name'] = tok_name.texts_to_sequences(dev_df["name"].values) dev_X = get_rnn_data(dev_df) preds_rnn = model.predict(dev_X) print("RNN dev RMSLE:", rmsle(np.expm1(dev_y), np.expm1(preds_rnn.flatten())) )<feature_engineering>
def createModel() : model = models.Sequential() model.add(layers.Conv2D(32,(3, 3), activation='relu', padding='same', input_shape=(28, 28, 1))) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(64,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(128,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Conv2D(256,(3, 3), activation='relu', padding='same')) model.add(layers.BatchNormalization()) model.add(layers.MaxPooling2D(( 2, 2))) model.add(layers.Dropout(0.2)) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.BatchNormalization()) model.add(layers.Dropout(0.2)) model.add(layers.Dense(10, activation='softmax')) model.compile(optimizer=optimizers.Adam(lr=1e-3), loss='categorical_crossentropy', metrics=['acc']) return model
Digit Recognizer
14,596,146
test_df['general_cat'], test_df['subcat_1'], test_df['subcat_2'] = \ zip(*test_df['category_name'].apply(lambda x: split_cat(x))) test_df.drop('category_name', axis=1, inplace=True) handle_missing_inplace(test_df) cutting(test_df) text_length_feature(test_df) normalize_dataset_text(test_df) delete_unseen(test_df) to_categorical(test_df )<predict_on_test>
lr_reduction = ReduceLROnPlateau(monitor = 'val_acc', factor = 0.5, min_lr = 1e-6) checkpoint = ModelCheckpoint('./trainedModel.hdf5',monitor = 'val_acc', mode = "max", save_best_model = True )
Digit Recognizer