kernel_id
int64
24.2k
23.3M
prompt
stringlengths
8
1.85M
completetion
stringlengths
1
182k
comp_name
stringlengths
5
57
12,607,235
n_folds = 5 seed = 42 ScaleData = False if(ScaleData): scaler = MinMaxScaler(feature_range =(0, 1)).fit(train[features]) train.loc[:,features] = scaler.transform(train[features]) X = train[features] y =(train['resp'].values > 0 ).astype(int) X_train = train[train['date'] < 400][features] X_test = train[train['date'] >= 400][features] train['action'] =(( train['weight'].values * train['resp'].values)> 0 ).astype('int') y_train = train[train['date'] < 400]['action'] y_test = train[train['date'] >= 400]['action'] X_train = train[features] y_train = train['action']<count_unique_values>
model_pred = np.zeros(( X_test_resized.shape[0], label_count)) for fold_i in range(NFOLD): best_model_file_path = make_best_model_file_path(fold_i) model.load_weights(best_model_file_path) train_ds, val_ds = get_fold(fold_i) val_pred = model.predict(val_ds) val_pred_file_name = make_val_pred_file_path(fold_i) np.save(val_pred_file_name, val_pred) test_ds = make_test_ds(X_test_resized) print("Fold {0}".format(fold_i), end='', flush=True) for i in range(TTA_COUNT): print('.', end='', flush=True) model_pred += model.predict(test_ds) print() y_pred = np.argmax(model_pred, axis=1) print(y_pred.shape )
Digit Recognizer
12,607,235
unique, counts = np.unique(y_test, return_counts=True) dict(zip(unique, counts))<train_model>
submission_df = read_df('sample_submission.csv') submission_df
Digit Recognizer
12,607,235
<normalization><EOS>
submission_df['Label'] = y_pred submission_df.to_csv("submission.csv", index=False) !head submission.csv
Digit Recognizer
13,231,007
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<load_from_csv>
import numpy as np import pandas as pd import matplotlib.pyplot as plt import matplotlib.image as mpimg import seaborn as sns from sklearn.model_selection import train_test_split import tensorflow as tf import tensorflow.keras as keras import tensorflow.keras.layers as layers from tensorflow.keras.preprocessing.image import ImageDataGenerator import tensorflow.keras.layers.experimental.preprocessing as preprocessing
Digit Recognizer
13,231,007
SEED = 1111 np.random.seed(SEED) train = pd.read_csv('.. /input/jane-street-market-prediction/train.csv') train = train.query('date > 85' ).reset_index(drop = True) train = train[train['weight'] != 0] train.fillna(train.mean() ,inplace=True) train['action'] =(( train['resp'].values)> 0 ).astype(int) features = [c for c in train.columns if "feature" in c] f_mean = np.mean(train[features[1:]].values,axis=0) resp_cols = ['resp_1', 'resp_2', 'resp_3', 'resp', 'resp_4'] X_train = train.loc[:, train.columns.str.contains('feature')] y_train = np.stack([(train[c] > 0 ).astype('int')for c in resp_cols] ).T def create_mlp( num_columns, num_labels, hidden_units, dropout_rates, label_smoothing, learning_rate ): inp = tf.keras.layers.Input(shape=(num_columns,)) x = tf.keras.layers.BatchNormalization()(inp) x = tf.keras.layers.Dropout(dropout_rates[0] )(x) for i in range(len(hidden_units)) : x = tf.keras.layers.Dense(hidden_units[i] )(x) x = tf.keras.layers.BatchNormalization()(x) x = tf.keras.layers.Activation(tf.keras.activations.swish )(x) x = tf.keras.layers.Dropout(dropout_rates[i + 1] )(x) x = tf.keras.layers.Dense(num_labels )(x) out = tf.keras.layers.Activation("sigmoid" )(x) model = tf.keras.models.Model(inputs=inp, outputs=out) model.compile( optimizer=tfa.optimizers.RectifiedAdam(learning_rate=learning_rate), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=label_smoothing), metrics=tf.keras.metrics.AUC(name="AUC"), ) return model epochs = 200 batch_size = 4096 hidden_units = [160, 160, 160] dropout_rates = [0.2, 0.2, 0.2, 0.2] label_smoothing = 1e-2 learning_rate = 1e-3 tf.keras.backend.clear_session() tf.random.set_seed(SEED) clf = create_mlp( len(features), 5, hidden_units, dropout_rates, label_smoothing, learning_rate ) clf.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=2) clf.save(f'model.h5') th = 0.502 models = [clf] f = np.median env = janestreet.make_env() for(test_df, pred_df)in tqdm(env.iter_test()): if test_df['weight'].item() > 0: x_tt = test_df.loc[:, features].values if np.isnan(x_tt[:, 1:].sum()): x_tt[:, 1:] = np.nan_to_num(x_tt[:, 1:])+ np.isnan(x_tt[:, 1:])* f_mean pred = np.mean([model(x_tt, training = False ).numpy() for model in models],axis=0) pred = f(pred) pred_df.action = np.where(pred >= th, 1, 0 ).astype(int) else: pred_df.action = 0 env.predict(pred_df )<install_modules>
%matplotlib inline plt.style.use('bmh') np.random.seed(42 )
Digit Recognizer
13,231,007
%%bash python -m pip install py7zr mkdir data<load_pretrained>
data = pd.read_csv(".. /input/digit-recognizer/train.csv") holdout = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
13,231,007
for f in ["train.tsv.7z", "test.tsv.7z"]: archive = py7zr.SevenZipFile(".. /input/mercari-price-suggestion-challenge/{}".format(f), mode='r') archive.extractall(path="./data") archive.close() with zipfile.ZipFile(".. /input/mercari-price-suggestion-challenge/test_stg2.tsv.zip", 'r')as zip_ref: zip_ref.extractall("./data" )<install_modules>
X = data y = X.pop('label') del data
Digit Recognizer
13,231,007
%%bash git clone https://github.com/joren015/MercariProject.git cp -r MercariProject/*./ python -m pip install -r requirements.txt<set_options>
X = X / 255.0 holdout = holdout / 255.0
Digit Recognizer
13,231,007
%%bash python main.py neural_network predict<set_options>
X = X.values.reshape(-1,28,28,1) holdout = holdout.values.reshape(-1,28,28,1) y = keras.utils.to_categorical(y, num_classes = 10 )
Digit Recognizer
13,231,007
pd.set_option('display.max_rows', 100) pd.set_option('display.max_columns', 500) pd.set_option('display.max_colwidth', None) pd.set_option('display.float_format', lambda x: '%.5f' % x) for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) <load_pretrained>
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size =.1, stratify = y, random_state = 42 )
Digit Recognizer
13,231,007
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z !unzip -o /kaggle/input/mercari-price-suggestion-challenge/sample_submission_stg2.csv.zip !unzip -o /kaggle/input/mercari-price-suggestion-challenge/test_stg2.tsv.zip<load_from_csv>
model = keras.Sequential([ layers.InputLayer(input_shape=[28, 28, 1]), preprocessing.RandomRotation(factor = 0.10), preprocessing.RandomTranslation(height_factor = 0.1, width_factor = 0.1), preprocessing.RandomZoom(height_factor = 0.1, width_factor = 0.1), layers.BatchNormalization(renorm = True), layers.Conv2D(filters = 32, kernel_size = 5, activation = 'relu', padding = 'same'), layers.Conv2D(filters = 32, kernel_size = 5, activation = 'relu', padding = 'same'), layers.Dropout(0.25), layers.Conv2D(filters = 128, kernel_size = 3, activation = 'relu', padding = 'same'), layers.MaxPool2D() , layers.Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu', padding = 'same'), layers.Conv2D(filters = 64, kernel_size =(3, 3), activation = 'relu', padding = 'same'), layers.MaxPool2D(pool_size =(2, 2), strides =(2, 2)) , layers.Flatten() , layers.Dense(256, activation = 'relu'), layers.Dense(10, activation = 'softmax'), ] )
Digit Recognizer
13,231,007
train = pd.read_csv("train.tsv", low_memory=False, sep='\t') test = pd.read_csv("test_stg2.tsv", low_memory=False, sep='\t') train.info(memory_usage="deep" )<count_unique_values>
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0) model.compile(optimizer = optimizer , loss = "categorical_crossentropy", metrics=["accuracy"] )
Digit Recognizer
13,231,007
df = pd.concat([train.drop(["train_id", "price"], axis=1 ).nunique() , test.drop(["test_id"], axis=1 ).nunique() ], axis=1) df.columns = ["Train", "Test"] df.T<count_values>
epochs = 300 batch_size = 64
Digit Recognizer
13,231,007
train["name"].value_counts() [:5], test["name"].value_counts() [:5]<count_values>
early_stopping = keras.callbacks.EarlyStopping( min_delta=0.001, patience=10, restore_best_weights=True, )
Digit Recognizer
13,231,007
train["brand_name"].value_counts() [:5], test["brand_name"].value_counts() [:5]<count_values>
optimizer = tf.keras.optimizers.Adam(epsilon=0.01) model.compile( optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'], ) history = model.fit(X_train, y_train, batch_size = batch_size, epochs = epochs, validation_data =(X_test, y_test), callbacks = [early_stopping], verbose = 2)
Digit Recognizer
13,231,007
train["category_name"].value_counts() [:5], test["category_name"].value_counts() [:5]<count_values>
results = model.predict(holdout) results = np.argmax(results,axis = 1) results = pd.Series(results, name = "Label" )
Digit Recognizer
13,231,007
train["item_description"].value_counts() [:5], test["item_description"].value_counts() [:5]<drop_column>
submission = pd.concat( [pd.Series(range(1, 28001), name = "ImageId"), results], axis = 1) submission.to_csv("digits_submission.csv", index = False )
Digit Recognizer
5,674,715
train.drop(train[train["price"] <= 0].index, inplace=True) train.reset_index(inplace=True, drop=True )<feature_engineering>
from keras.utils.np_utils import to_categorical
Digit Recognizer
5,674,715
def process_data(df): df[["First_category", "Second_category", "Third_category"]] = df["category_name"].str.split('/', 2, expand=True) df.drop("category_name", axis=1, inplace=True) df["item_description"] = df["item_description"].replace({"No description yet": np.nan}) df["Category_was_missing"] = df["First_category"].isna() df["Brand_was_missing"] = df["brand_name"].isna() df["Description_was_missing"] = df["item_description"].isna() for i in ["brand_name", "First_category", "Second_category", "Third_category", "item_description"]: df[i] = df[i].fillna("missing") df["Price_was_in_description"] = df["item_description"].str.contains("\[rm\]") df["Price_was_in_name"] = df["name"].str.contains("\[rm\]") df["descr_len"] = df["item_description"].str.len() df["descr_len"] = pd.cut(df["descr_len"], np.arange(0, 1055, 5), right=False) df["descr_len"] = df["descr_len"].astype("string") stop_words = set(stopwords.words("english")) for column in ["item_description", "brand_name", "name"]: processed_column = [] for text_row in df[column]: text_row = text_row.replace('[rm]', '') text_row = re.sub('[^A-Za-z0-9]+', ' ', text_row) if column != "brand_name": text_row = ' '.join(word for word in text_row.lower().split() if word not in stop_words) processed_column.append(text_row.strip()) df[column] = processed_column for column in ["First_category", "Second_category", "Third_category"]: processed_column = [] for text_row in df[column]: text_row = text_row.replace(' ','') text_row = text_row.replace('&','_') text_row = re.sub('[^A-Za-z0-9_]+', ' ', text_row) processed_column.append(text_row.lower().strip()) df[column] = processed_column return df<create_dataframe>
training_dataset = pd.read_csv("/kaggle/input/train.csv") testing_dataset = pd.read_csv("/kaggle/input/test.csv" )
Digit Recognizer
5,674,715
train = process_data(train )<prepare_x_and_y>
Y_train_init = training_dataset['label'] X_train_init = training_dataset.drop(labels=['label'], axis=1 )
Digit Recognizer
5,674,715
def make_tokens_count_plot(count_vercorizer, matrix, column_name, set_name="train"): if set_name == "train": color = "lightcoral" else: color = "steelblue" df = pd.DataFrame() df["tokens"] = count_vercorizer.get_feature_names() df["counts"] = np.asarray(matrix.sum(axis=0)) [0] x = df.sort_values("counts", axis=0, ascending=False ).head(50)["tokens"] y = df.sort_values("counts", axis=0, ascending=False ).head(50)["counts"] fig, ax = plt.subplots(figsize=(15, 6)) ax.bar(x, y, color=color, edgecolor="black") ax.set_title(f"Most popular words in the {column_name} column of the {set_name} dataset", fontsize=20, pad=15) ax.set_ylabel("Count", fontsize=14, labelpad=15) ax.set_xlabel("Word", fontsize=14, labelpad=10) ax.set_xticks(x) ax.set_xticklabels(x, rotation = 60, ha="right", rotation_mode='anchor') ax.tick_params(axis="x", labelsize=14) ax.tick_params(axis="y", labelsize=14) ax.grid(axis="y") ax.margins(0.025, 0.05) plt.show() ;<split>
Digit Recognizer
5,674,715
def get_transformed_train_valid_data(df, y, cat_features): X_train, X_valid, y_train, y_valid = train_test_split(df, y, test_size=0.2, random_state=42) label_binarizers = [] binarized_columns = [] count_vercorizers = [] vectorized_columns = [] for column in cat_features: binarizer = LabelBinarizer(sparse_output=True) binarized_column = binarizer.fit_transform(X_train[column]) label_binarizers.append(binarizer) binarized_columns.append(binarized_column) vectorizer = CountVectorizer(min_df=7, max_features=20000) vectorized_column = vectorizer.fit_transform(X_train["name"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "name", "train") vectorizer = CountVectorizer(min_df=15, ngram_range=(1, 2), max_features=60000) vectorized_column = vectorizer.fit_transform(X_train["item_description"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "train") vectorizer = CountVectorizer(min_df=30, ngram_range=(3, 3), max_features=10000) vectorized_column = vectorizer.fit_transform(X_train["item_description"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "train") print(f"Name columns vectorized shape is {vectorized_columns[0].shape}") print(f"Item_description columns vectorized shape is {vectorized_columns[1].shape}") print(f"Item_description columns vectorized(1,3)shape is {vectorized_columns[2].shape}") X_train_stack = hstack(( binarized_columns[0], binarized_columns[1], binarized_columns[2], binarized_columns[3], binarized_columns[4], binarized_columns[5], binarized_columns[6], binarized_columns[7], binarized_columns[8], binarized_columns[9], binarized_columns[10], binarized_columns[11], vectorized_columns[0], vectorized_columns[1], vectorized_columns[2])).tocsr() X_valid_stack = get_transformed_test_data(X_valid, cat_features, label_binarizers, count_vercorizers) return X_train_stack, X_valid_stack, y_train, y_valid, label_binarizers, count_vercorizers def get_transformed_test_data(df, cat_features, label_binarizers, count_vercorizers): binarized_columns = [] vectorized_columns = [] for num, column in enumerate(cat_features): binarizer = label_binarizers[num] binarized_column = binarizer.transform(df[column]) binarized_columns.append(binarized_column) vectorizer = count_vercorizers[0] vectorized_column = vectorizer.transform(df["name"]) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "name", "valid/test") vectorizer = count_vercorizers[1] vectorized_column = vectorizer.transform(df["item_description"]) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "valid/test") vectorizer = count_vercorizers[2] vectorized_column = vectorizer.transform(df["item_description"]) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "valid/test") print(f"Name columns vectorized shape is {vectorized_columns[0].shape}") print(f"Item_description columns vectorized shape is {vectorized_columns[1].shape}") print(f"Item_description columns vectorized(1,3)shape is {vectorized_columns[2].shape}") X_test_stack = hstack(( binarized_columns[0], binarized_columns[1], binarized_columns[2], binarized_columns[3], binarized_columns[4], binarized_columns[5], binarized_columns[6], binarized_columns[7], binarized_columns[8], binarized_columns[9], binarized_columns[10], binarized_columns[11], vectorized_columns[0], vectorized_columns[1], vectorized_columns[2])).tocsr() return X_test_stack def get_transformed_train_data(df, cat_features): label_binarizers = [] binarized_columns = [] count_vercorizers = [] vectorized_columns = [] for column in cat_features: binarizer = LabelBinarizer(sparse_output=True) binarized_column = binarizer.fit_transform(df[column]) label_binarizers.append(binarizer) binarized_columns.append(binarized_column) vectorizer = CountVectorizer(min_df=7, max_features=20000) vectorized_column = vectorizer.fit_transform(df["name"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "name", "train") vectorizer = CountVectorizer(min_df=15, ngram_range=(1, 2), max_features=60000) vectorized_column = vectorizer.fit_transform(df["item_description"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "train") vectorizer = CountVectorizer(min_df=30, ngram_range=(3, 3), max_features=10000) vectorized_column = vectorizer.fit_transform(df["item_description"]) count_vercorizers.append(vectorizer) vectorized_columns.append(vectorized_column) make_tokens_count_plot(vectorizer, vectorized_column, "item_description", "train") print(f"Name columns vectorized shape is {vectorized_columns[0].shape}") print(f"Item_description columns vectorized shape is {vectorized_columns[1].shape}") print(f"Item_description columns vectorized(1,3)shape is {vectorized_columns[2].shape}") X_train_stack = hstack(( binarized_columns[0], binarized_columns[1], binarized_columns[2], binarized_columns[3], binarized_columns[4], binarized_columns[5], binarized_columns[6], binarized_columns[7], binarized_columns[8], binarized_columns[9], binarized_columns[10], binarized_columns[11], vectorized_columns[0], vectorized_columns[1], vectorized_columns[2])).tocsr() return X_train_stack, label_binarizers, count_vercorizers<drop_column>
Y_train = training_dataset['label'] X_train = training_dataset.drop(labels=['label'], axis=1) nums = 10
Digit Recognizer
5,674,715
cat_features = ["item_condition_id", "First_category", "Second_category", "Third_category", "shipping", "brand_name", "Description_was_missing", "Price_was_in_name", "Price_was_in_description", "Brand_was_missing", "Category_was_missing", "descr_len"] X_train, label_binarizers, count_vercorizers = get_transformed_train_data(train.drop(["train_id", "price"], axis=1), cat_features) y_train = np.log1p(train["price"] )<drop_column>
X_test = testing_dataset.values
Digit Recognizer
5,674,715
for column in train.columns: train.drop(column, axis=1, inplace=True) del train gc.collect()<compute_test_metric>
X_train = X_train/255.0
Digit Recognizer
5,674,715
def rmsle(y_true, y_preds): return np.sqrt(mean_squared_log_error(y_true, y_preds)) def get_scores(model, X_train, X_valid, y_train, y_valid): train_preds = model.predict(X_train) val_preds = model.predict(X_valid) scores = {"Training MAE": mean_absolute_error(y_train, train_preds), "Validation MAE": mean_absolute_error(y_valid, val_preds), "Training RMSLE": rmsle(y_train, train_preds), "Validation RMSLE": rmsle(y_valid, val_preds), "Training R^2": model.score(X_train, y_train), "Validation R^2": model.score(X_valid, y_valid)} return scores<train_model>
X_test = X_test/ 255.0
Digit Recognizer
5,674,715
%%time model = SGDRegressor(alpha=0.000001, penalty="l2", random_state=42, loss="epsilon_insensitive", max_iter=50000) model.fit(X_train, y_train )<drop_column>
Y_train = to_categorical(Y_train, num_classes=nums )
Digit Recognizer
5,674,715
del X_train, y_train gc.collect()<create_dataframe>
random_seed = 2
Digit Recognizer
5,674,715
preds = pd.DataFrame(columns = ["test_id", "price"]) preds["test_id"] = test["test_id"]<categorify>
from sklearn.model_selection import train_test_split
Digit Recognizer
5,674,715
test = process_data(test.drop("test_id", axis=1)) X_test = get_transformed_test_data(test, cat_features, label_binarizers, count_vercorizers )<save_to_csv>
from sklearn.model_selection import train_test_split
Digit Recognizer
5,674,715
preds["price"] = np.expm1(model.predict(X_test)) preds.to_csv('submission.csv', index=False) preds.head()<load_pretrained>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, random_state=random_seed, test_size = 0.3 )
Digit Recognizer
5,674,715
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/test.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/sample_submission.csv.7z<load_from_csv>
from keras import Sequential from keras.layers import Dense, MaxPooling2D, Dropout, Flatten, Conv2D, BatchNormalization from keras.callbacks import ReduceLROnPlateau, LearningRateScheduler from keras.preprocessing.image import ImageDataGenerator
Digit Recognizer
5,674,715
!unzip /kaggle/input/mercari-price-suggestion-challenge/sample_submission_stg2.csv.zip !unzip /kaggle/input/mercari-price-suggestion-challenge/test_stg2.tsv.zip<import_modules>
batch_size = 65 epochs = 23 model = Sequential() model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu', padding='same', input_shape=(28, 28, 1))) model.add(BatchNormalization(scale=False,center=True)) model.add(MaxPooling2D(padding='same', pool_size=(2, 2))) model.add(Conv2D(filters=32, kernel_size=(5, 5), activation='relu', padding='same')) model.add(MaxPooling2D(padding='same', pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu', padding='same')) model.add(MaxPooling2D(padding='same', pool_size=(2, 2))) model.add(Conv2D(filters=64, kernel_size=(7, 7), activation='relu', padding='same')) model.add(MaxPooling2D(padding='same', pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=128, kernel_size=(5, 5), activation='relu', padding='same')) model.add(MaxPooling2D(padding='same', pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(scale=False,center=True)) model.add(Dense(nums, activation='softmax'))
Digit Recognizer
5,674,715
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import math from sklearn.linear_model import Ridge, LogisticRegression from sklearn.model_selection import train_test_split, cross_val_score from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import LabelBinarizer<load_from_csv>
model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
Digit Recognizer
5,674,715
train = pd.read_table('train.tsv') test = pd.read_table('test_stg2.tsv') print(train.shape) print(test.shape )<count_missing_values>
t = 1 def decide_f(t): if t < 5: f =.5 else: f =.9 t = t + 1 return(f) reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=decide_f(t),verbose=1, patience=2, min_lr=0.00000000001 )
Digit Recognizer
5,674,715
train.isnull().sum()<categorify>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=15, zoom_range = 0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=False, vertical_flip=False )
Digit Recognizer
5,674,715
def handle_missing(dataset): dataset.category_name.fillna(value="missing", inplace=True) dataset.brand_name.fillna(value="missing", inplace=True) dataset.item_description.fillna(value="missing", inplace=True) return(dataset )<correct_missing_values>
datagen.fit(X_train) h = model.fit_generator(datagen.flow(X_train,Y_train, batch_size=batch_size), epochs = epochs, validation_data =(X_val,Y_val), verbose = 1, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[reduce_lr],) final_loss, final_acc = model.evaluate(X_val, Y_val, verbose=0) print("Final loss: {0:.6f}, final accuracy: {1:.6f}".format(final_loss, final_acc))
Digit Recognizer
5,674,715
train = handle_missing(train) test = handle_missing(test )<categorify>
prediction = model.predict(X_test)
Digit Recognizer
5,674,715
def transform_category_name(category_name): try: sub1, sub2, sub3 = category_name.split('/') return sub1, sub2, sub3 except: return "none", "none", "none" train['subcat_1'], train['subcat_2'], train['subcat_3'] = zip(*train['category_name'].apply(transform_category_name)) test['subcat_1'], test['subcat_2'], test['subcat_3'] = zip(*test['category_name'].apply(transform_category_name)) train.head(10 )<count_unique_values>
label = np.argmax(prediction, axis=1 )
Digit Recognizer
5,674,715
print("There are %d unique subcat_1." % train['subcat_1'].nunique()) print("There are %d unique subcat_2." % train['subcat_2'].nunique()) print("There are %d unique subcat_3." % train['subcat_3'].nunique() )<count_values>
test_id = np.reshape(range(1, len(prediction)+ 1), label.shape )
Digit Recognizer
5,674,715
<categorify><EOS>
my_submission = pd.DataFrame({'ImageId': test_id, 'Label': label}) my_submission.to_csv('submission.csv', index=False )
Digit Recognizer
13,071,393
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
%matplotlib inline
Digit Recognizer
13,071,393
lb_shipping = LabelBinarizer(sparse_output=True) train_shipping = lb_shipping.fit_transform(train['shipping']) test_shipping = lb_shipping.transform(test['shipping']) train_shipping.shape<categorify>
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv") test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv") y_train = train["label"] x_train = train.drop(labels = ["label"], axis = 1) x_train = x_train.values.reshape(-1,28,28,1) test = test.values.reshape(-1,28,28,1) del train X_train /= 255 test /= 255
Digit Recognizer
13,071,393
lb_brand_name = LabelBinarizer(sparse_output=True) train_brand_name= lb_brand_name.fit_transform(train['brand_name']) test_brand_name = lb_brand_name.transform(test['brand_name']) train_brand_name.shape<feature_engineering>
y_train = to_categorical(y_train, num_classes = 10 )
Digit Recognizer
13,071,393
count_vec = CountVectorizer() train_name = count_vec.fit_transform(train['name']) test_name = count_vec.transform(test['name']) print(train_name.shape )<categorify>
random_seed = 2 x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size = 0.2, random_state = random_seed )
Digit Recognizer
13,071,393
tfidf_des = TfidfVectorizer(max_features=50000, ngram_range=(1, 3), stop_words='english') train_des = tfidf_des.fit_transform(train['item_description']) test_des = tfidf_des.transform(test['item_description']) train_des.shape<categorify>
def create_model() : def add_conv_block(model, num_filters): model.add(Conv2D(num_filters,3,activation='relu', padding = 'same')) model.add(BatchNormalization()) model.add(Conv2D(num_filters,3, activation= 'relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Dropout(0.5)) return model model = tf.keras.models.Sequential() model.add(Input(shape=(28,28,1))) model = add_conv_block(model, 32) model = add_conv_block(model, 64) model = add_conv_block(model, 128) model.add(Flatten()) model.add(Dense(128, activation="relu")) model.add(Dense(128, activation="relu")) model.add(Dense(10,activation = 'softmax')) model.compile( loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'] ) return model model = create_model() model.summary()
Digit Recognizer
13,071,393
lb_cat_1 = LabelBinarizer(sparse_output=True) train_cat_1 = lb_cat_1.fit_transform(train['subcat_1']) test_cat_1 = lb_cat_1.transform(test['subcat_1']) lb_cat_2 = LabelBinarizer(sparse_output=True) train_cat_2 = lb_cat_2.fit_transform(train['subcat_2']) test_cat_2 = lb_cat_2.transform(test['subcat_2']) lb_cat_3 = LabelBinarizer(sparse_output=True) train_cat_3 = lb_cat_3.fit_transform(train['subcat_3']) test_cat_3 = lb_cat_3.transform(test['subcat_3'] )<set_options>
h = model.fit( x_train,y_train, validation_data =(x_val,y_val), epochs=20,batch_size=30 )
Digit Recognizer
13,071,393
sparse_matrix_list =(train_name, train_des, train_brand_name, train_condition, train_shipping, train_cat_1, train_cat_2, train_cat_3) X_train = hstack(sparse_matrix_list ).tocsr() print(X_train.shape) del X_train gc.collect()<compute_test_metric>
results = model.predict(test) results = np.argmax(results,axis = 1) results = pd.Series(results,name="Label" )
Digit Recognizer
13,071,393
<split><EOS>
submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),results],axis = 1) submission.to_csv("cnn_mnist_datagen.csv",index=False )
Digit Recognizer
12,762,404
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<compute_test_metric>
%matplotlib inline
Digit Recognizer
12,762,404
model = ridge = Ridge() Ridge_preds, y_test = run_model(model, matrix_list=sparse_matrix_list) print("rmsle: "+str(rmsle(np.expm1(y_test), np.expm1(Ridge_preds))))<load_from_csv>
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv" )
Digit Recognizer
12,762,404
submission = pd.read_csv('sample_submission_stg2.csv') submission submission.loc[:, 'price'] = preds submission<save_to_csv>
( x_train, y_train),(x_test, y_test)= mnist.load_data()
Digit Recognizer
12,762,404
submission.to_csv('submission.csv', index=False )<save_to_csv>
pred_train = x_train.reshape(x_train.shape[0], 28, 28, 1) pred_test = x_test.reshape(x_test.shape[0], 28, 28, 1) pred_train = pred_train.astype('float32') pred_test = pred_test.astype('float32') pred_train /=255 pred_test /=255 class_train = np_utils.to_categorical(y_train, 10) class_test = np_utils.to_categorical(y_test, 10 )
Digit Recognizer
12,762,404
submission.to_csv('submission.csv', index=False )<load_pretrained>
classifier = Sequential() classifier.add(Conv2D(32,(3,3), input_shape =(28,28,1), activation = 'relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size =(2,2))) classifier.add(Conv2D(32,(3,3), activation = 'relu')) classifier.add(BatchNormalization()) classifier.add(MaxPooling2D(pool_size =(2,2))) classifier.add(Flatten()) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 128, activation = 'relu')) classifier.add(Dropout(0.2)) classifier.add(Dense(units = 10, activation = 'softmax')) classifier.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics=['accuracy'] )
Digit Recognizer
12,762,404
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z<compute_test_metric>
classifier.fit(pred_train, class_train, batch_size = 128, epochs = 30, validation_data =(pred_test, class_test))
Digit Recognizer
12,762,404
def rmsle(y, y_pred): assert len(y)== len(y_pred) to_sum = [(math.log(y_pred[i] + 1)- math.log(y[i] + 1)) ** 2.0 for i,pred in enumerate(y_pred)] return(sum(to_sum)*(1.0/len(y)))** 0.5 <load_from_csv>
X_test = test/255 X_test = X_test.values X_test = X_test.reshape(X_test.shape[0], 28, 28, 1 ).astype('float32') pred = classifier.predict(X_test) pred.shape
Digit Recognizer
12,762,404
print("Loading data...") train = pd.read_table("train.tsv", sep="\t") test = pd.read_csv(".. /input/mercari-price-suggestion-challenge/test_stg2.tsv.zip" , sep='\t') print(train.shape) print(test.shape )<categorify>
sample_submit = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv") sample_submit.head()
Digit Recognizer
12,762,404
<categorify><EOS>
y_pred = pred.argmax(axis=1) ImageID = np.arange(len(y_pred)) +1 submit = pd.DataFrame([ImageID,y_pred] ).T submit.rename(columns = {0:'ImageId', 1:'Label'}) submit.to_csv('submission.csv', header = ['ImageId', 'Label' ], index = None )
Digit Recognizer
14,318,308
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<feature_engineering>
from logging import DEBUG from logging import Formatter from logging import getLogger from logging import StreamHandler from matplotlib import pyplot from numpy import arange from numpy import argmax from numpy import asarray from pandas import DataFrame from pandas import read_csv from tensorflow import random from tensorflow.keras import initializers from tensorflow.keras import layers from tensorflow.keras import models from tensorflow.keras import optimizers from tensorflow.keras import preprocessing from tensorflow.keras import utils
Digit Recognizer
14,318,308
print("Text to seq process...") raw_text = np.hstack([train.item_description.str.lower() , train.name.str.lower() ]) print(" Fitting tokenizer...") tok_raw = Tokenizer() tok_raw.fit_on_texts(raw_text) print(" Transforming text to seq...") train["seq_item_description"] = tok_raw.texts_to_sequences(train.item_description.str.lower()) test["seq_item_description"] = tok_raw.texts_to_sequences(test.item_description.str.lower()) train["seq_name"] = tok_raw.texts_to_sequences(train.name.str.lower()) test["seq_name"] = tok_raw.texts_to_sequences(test.name.str.lower()) train.head(3 )<define_variables>
FOLDER_PATH = '.. /input/digit-recognizer/' FILE_NAME_TRAIN = 'train.csv' FILE_NAME_TEST = 'test.csv' FILE_NAME_SUBMISSION = 'submission.csv'
Digit Recognizer
14,318,308
MAX_NAME_SEQ = 10 MAX_ITEM_DESC_SEQ = 75 MAX_TEXT = np.max([np.max(train.seq_name.max()) , np.max(test.seq_name.max()) , np.max(train.seq_item_description.max()) , np.max(test.seq_item_description.max())])+2 MAX_CATEGORY = np.max([train.category_name.max() , test.category_name.max() ])+1 MAX_BRAND = np.max([train.brand_name.max() , test.brand_name.max() ])+1 MAX_CONDITION = np.max([train.item_condition_id.max() , test.item_condition_id.max() ])+1<split>
def get_logger() : fmt = '%(asctime)s' fmt += ' - %(levelname)s' fmt += ' - %(name)s' fmt += ' - %(lineno)d' fmt += ' - %(funcName)s' fmt += ' - %(message)s' handler = StreamHandler() handler.setLevel(DEBUG) handler.setFormatter(Formatter(fmt)) logger = getLogger(__name__) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False return logger def kill_logger(logger): for h in logger.handlers: logger.removeHandler(h)
Digit Recognizer
14,318,308
dtrain, dvalid = train_test_split(train, random_state=123, train_size=0.99) print(dtrain.shape) print(dvalid.shape )<categorify>
def initialize() : random.set_seed(0)
Digit Recognizer
14,318,308
def get_keras_data(dataset): X = { 'name': pad_sequences(dataset.seq_name, maxlen=MAX_NAME_SEQ) ,'item_desc': pad_sequences(dataset.seq_item_description, maxlen=MAX_ITEM_DESC_SEQ) ,'brand_name': np.array(dataset.brand_name) ,'category_name': np.array(dataset.category_name) ,'item_condition': np.array(dataset.item_condition_id) ,'num_vars': np.array(dataset[["shipping"]]) } return X X_train = get_keras_data(dtrain) X_valid = get_keras_data(dvalid) X_test = get_keras_data(test )<compute_train_metric>
def get_model() : layer_in, layer_out = get_layers() model = models.Model( name='Model', inputs=layer_in, outputs=layer_out ) model.compile( loss='categorical_crossentropy', optimizer=optimizers.Nadam(learning_rate=0.01), metrics=['accuracy'] ) model.summary() print() return model def get_layers() : layer_in = get_input_layer('001', 28, 28, 1) layer_out = layer_in layer_out = get_conv_layer(layer_out, '002', 3, 3, 32) layer_out = get_conv_layer(layer_out, '003', 3, 3, 32) layer_out = get_pooling_layer(layer_out, '004', 2, 2) layer_out = get_dropout_layer(layer_out, '005', 0.2) layer_out = get_batchnormalization_layer(layer_out, '006') layer_out = get_conv_layer(layer_out, '007', 3, 3, 32) layer_out = get_conv_layer(layer_out, '008', 3, 3, 32) layer_out = get_pooling_layer(layer_out, '009', 2, 2) layer_out = get_dropout_layer(layer_out, '010', 0.2) layer_out = get_batchnormalization_layer(layer_out, '011') layer_out = get_flatten_layer(layer_out, '012') layer_out = get_full_connect_layer(layer_out, '013', 400) layer_out = get_dropout_layer(layer_out, '014', 0.5) layer_out = get_batchnormalization_layer(layer_out, '015') layer_out = get_output_layer(layer_out, '016', 10) return layer_in, layer_out
Digit Recognizer
14,318,308
def get_callbacks(filepath, patience=2): es = EarlyStopping('val_loss', patience=patience, mode="min") msave = ModelCheckpoint(filepath, save_best_only=True) return [es, msave] def rmsle_cust(y_true, y_pred): first_log = K.log(K.clip(y_pred, K.epsilon() , None)+ 1.) second_log = K.log(K.clip(y_true, K.epsilon() , None)+ 1.) return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1)) def get_model() : dr_r = 0.1 name = Input(shape=[X_train["name"].shape[1]], name="name") item_desc = Input(shape=[X_train["item_desc"].shape[1]], name="item_desc") brand_name = Input(shape=[1], name="brand_name") category_name = Input(shape=[1], name="category_name") item_condition = Input(shape=[1], name="item_condition") num_vars = Input(shape=[X_train["num_vars"].shape[1]], name="num_vars") emb_name = Embedding(MAX_TEXT, 50 )(name) emb_item_desc = Embedding(MAX_TEXT, 50 )(item_desc) emb_brand_name = Embedding(MAX_BRAND, 10 )(brand_name) emb_category_name = Embedding(MAX_CATEGORY, 10 )(category_name) emb_item_condition = Embedding(MAX_CONDITION, 5 )(item_condition) rnn_layer1 = GRU(16 )(emb_item_desc) rnn_layer2 = GRU(8 )(emb_name) main_l = concatenate([ Flatten()(emb_brand_name) , Flatten()(emb_category_name) , Flatten()(emb_item_condition) , rnn_layer1 , rnn_layer2 , num_vars ]) main_l = Dropout(dr_r )(Dense(128 )(main_l)) main_l = Dropout(dr_r )(Dense(64 )(main_l)) output = Dense(1, activation="linear" )(main_l) model = Model([name, item_desc, brand_name , category_name, item_condition, num_vars], output) model.compile(loss="mse", optimizer="adam", metrics=["mae", rmsle_cust]) return model model = get_model() model.summary()<train_model>
def get_input_layer(name, width, height, channel): return layers.Input( name=name, shape=(width, height, channel) )
Digit Recognizer
14,318,308
BATCH_SIZE = 20000 epochs = 5 model = get_model() model.fit(X_train, dtrain.target, epochs=epochs, batch_size=BATCH_SIZE , validation_data=(X_valid, dvalid.target) , verbose=1 )<compute_test_metric>
def get_conv_layer(layer, name, width, height, filters): stddev =(1 / width / height / layer.shape[-1])** 0.5 initializer = initializers.TruncatedNormal(stddev=stddev) return layers.Conv2D( name=name, filters=filters, kernel_size=(width, height), kernel_initializer=initializer, activation='relu' )(layer)
Digit Recognizer
14,318,308
val_preds = model.predict(X_valid) val_preds = np.exp(val_preds)+1 y_true = np.array(dvalid.price.values) y_pred = val_preds[:,0] v_rmsle = rmsle(y_true, y_pred) print(" RMSLE error on dev test: "+str(v_rmsle))<predict_on_test>
def get_pooling_layer(layer, name, width, height): return layers.MaxPooling2D( name=name, pool_size=(width, height) )(layer)
Digit Recognizer
14,318,308
preds = model.predict(X_test, batch_size=BATCH_SIZE) preds = np.exp(preds)-1 submission = test[["test_id"]] submission["price"] = preds<save_to_csv>
def get_dropout_layer(layer, name, rate): return layers.Dropout( name=name, rate=rate )(layer)
Digit Recognizer
14,318,308
submission.to_csv("./submission.csv", index=False) submission.price.hist()<load_from_csv>
def get_batchnormalization_layer(layer, name): return layers.BatchNormalization( name=name )(layer)
Digit Recognizer
14,318,308
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/test.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/sample_submission.csv.7z !unzip /kaggle/input/mercari-price-suggestion-challenge/sample_submission_stg2.csv.zip !unzip /kaggle/input/mercari-price-suggestion-challenge/test_stg2.tsv.zip <load_from_csv>
def get_flatten_layer(layer, name): return layers.Flatten( name=name )(layer)
Digit Recognizer
14,318,308
train_dataset = pd.read_table("train.tsv") test_dataset = pd.read_table("test_stg2.tsv" )<split>
def get_full_connect_layer(layer, name, units): stddev =(1 / layer.shape[-1])** 0.5 initializer = initializers.TruncatedNormal(stddev=stddev) return layers.Dense( name=name, units=units, kernel_initializer=initializer, activation='relu' )(layer)
Digit Recognizer
14,318,308
train_dataset = train_dataset[train_dataset['price'] > 0].reset_index(drop = True) train, validation = train_test_split(train_dataset, test_size = 0.1, random_state = 30) test = test_dataset.copy() print(train.shape) print(validation.shape) print(test.shape )<count_missing_values>
def get_output_layer(layer, name, units): stddev =(1 / layer.shape[-1])** 0.5 initializer = initializers.TruncatedNormal(stddev=stddev) return layers.Dense( name=name, units=units, kernel_initializer=initializer, activation='softmax' )(layer)
Digit Recognizer
14,318,308
train.isnull().sum()<count_missing_values>
def read_csv_x(file_path): return read_csv( file_path, usecols=lambda col: col != 'label', dtype=float ) def read_csv_y(file_path): return read_csv( file_path, usecols=['label'], dtype=int )
Digit Recognizer
14,318,308
train.isnull().sum()<count_missing_values>
def transform_x_train(x_read): x_base = x_read.values.reshape(-1, 28, 28, 1)/ 255.0 result = list(x_base.copy()) for t in [10, -10]: result.extend( [ preprocessing.image.apply_affine_transform( x, channel_axis=2, theta=t, fill_mode='nearest' )for x in x_base ] ) return asarray(result) def transform_x_test(x_read): return x_read.values.reshape(-1, 28, 28, 1)/ 255.0 def transform_y(y_read): y_base = y_read.values.reshape(-1, 1) result = list(y_base.copy()) for i in range(2): result.extend(list(y_base)) return utils.to_categorical(asarray(result))
Digit Recognizer
14,318,308
validation.isnull().sum()<count_missing_values>
def predict(model, x): return argmax(model.predict(x, batch_size=1000), axis=1)
Digit Recognizer
14,318,308
test.isnull().sum()<count_missing_values>
def write_csv(result): df = DataFrame() df['ImageId'] = arange(len(result)) + 1 df['Label'] = result df.to_csv(FILE_NAME_SUBMISSION, index=False)
Digit Recognizer
14,318,308
<categorify><EOS>
logger = get_logger() logger.debug('Start') initialize() model = get_model() x_read_train = read_csv_x(FOLDER_PATH + FILE_NAME_TRAIN) y_read_train = read_csv_y(FOLDER_PATH + FILE_NAME_TRAIN) x_read_test = read_csv_x(FOLDER_PATH + FILE_NAME_TEST) x_train = transform_x_train(x_read_train) y_train = transform_y(y_read_train) x_test = transform_x_test(x_read_test) train(model, x_train, y_train) result = predict(model, x_test) write_csv(result) terminate() logger.debug('End') kill_logger(logger)
Digit Recognizer
14,403,737
<SOS> metric: categorizationaccuracy Kaggle data source: digit-recognizer<categorify>
train = pd.read_csv(".. /input/digit-recognizer/train.csv") test = pd.read_csv(".. /input/digit-recognizer/test.csv" )
Digit Recognizer
14,403,737
fill(train) fill(validation) fill(test )<feature_engineering>
x_train = train.drop('label',axis = 1) y_train = train['label'] x_test = test x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42) x_train = x_train.values.reshape(-1,28,28,1) x_val = x_val.values.reshape(-1,28,28,1) x_test = x_test.values.reshape(-1,28,28,1 )
Digit Recognizer
14,403,737
def split_category(cate): try: sub_cate1, sub_cate2, sub_cate3 = cate.split("/") return sub_cate1, sub_cate2, sub_cate3 except: return("Missing", "Missing", "Missing") def creat_category(data): data['sub_category1'], data['sub_category2'], data['sub_category3'] = zip(*data['category_name'].apply(lambda x: split_category(x)) )<import_modules>
aug = ImageDataGenerator(rotation_range=5, width_shift_range=0.05, height_shift_range=0.05, zoom_range = 0.05) aug.fit(x_train )
Digit Recognizer
14,403,737
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer from sklearn.preprocessing import LabelBinarizer from scipy.sparse import csr_matrix, hstack<concatenate>
model = Sequential() model.add(Conv2D(6, 5, padding="same", activation="relu", input_shape=[28, 28, 1])) model.add(MaxPool2D()) model.add(BatchNormalization()) model.add(Conv2D(16, 5, padding="same", activation="relu")) model.add(MaxPool2D()) model.add(BatchNormalization()) model.add(Conv2D(120, 5, padding="same", activation="relu")) model.add(Flatten()) model.add(BatchNormalization()) model.add(Dense(84, activation="relu")) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.compile(loss='sparse_categorical_crossentropy', optimizer="sgd", metrics=["accuracy"]) model.summary()
Digit Recognizer
14,403,737
merge: pd.DataFrame = pd.concat([train, validation, test]) submission: pd.DataFrame = test[['test_id']] n_train = train.shape[0] n_validation = validation.shape[0] n_test = test.shape[0]<feature_engineering>
early_stop = EarlyStopping( patience=6, restore_best_weights=True,monitor = 'val_acc') model.fit(aug.flow(x_train, y_train,batch_size = 64),steps_per_epoch=len(x_train)/ 64, epochs=100,validation_data =(x_val,y_val),callbacks=[early_stop],use_multiprocessing = True, verbose = 0 )
Digit Recognizer
14,403,737
NAME_MIN_DF = 10 MAX_FEATURES_ITEM_DESCRIPTION = 30000 def condition2vec(data): vectorizer = LabelBinarizer(sparse_output=True) vec = vectorizer.fit_transform(data) return vec def shipping2vec(data): vectorizer = LabelBinarizer(sparse_output=True) vec = vectorizer.fit_transform(data) return vec def brandname2vec(data): vectorizer = LabelBinarizer(sparse_output=True) vec = vectorizer.fit_transform(data) return vec def name2vec(data): vectorizer = CountVectorizer( min_df=NAME_MIN_DF, ngram_range=(1, 2), stop_words='english' ) vec = vectorizer.fit_transform(data) return vec def description2vec(data): vectorizer = TfidfVectorizer( max_features=MAX_FEATURES_ITEM_DESCRIPTION, ngram_range=(1, 2), stop_words='english' ) vec = vectorizer.fit_transform(data) return vec def category2vec(data): vectorizer = CountVectorizer() data_vec = [] for x in data: category_vec = vectorizer.fit_transform(x) data_vec.append(category_vec) print('Convert Category ') return hstack(data_vec) def put_all_together(merge): res = hstack(( name2vec(merge['name']), condition2vec(merge['item_condition_id']), brandname2vec(merge['brand_name']), shipping2vec(merge['shipping']), description2vec(merge['item_description']), category2vec(( merge['sub_category1'], merge['sub_category2'], merge['sub_category3'])) )) return res<compute_test_metric>
y_pred = np.argmax(model.predict(x_test),axis = 1) submission = pd.DataFrame({"imageId": np.arange(1, len(y_pred)+1),"Label":y_pred})
Digit Recognizer
14,403,737
all_data = all_data.tocsr() train_X, validation_X, test_X = all_data[:n_train], all_data[n_train:n_train + n_validation], all_data[n_train + n_validation:]<prepare_x_and_y>
submission.to_csv("lenet_sub.csv",index=False )
Digit Recognizer
14,542,096
train_Y = np.log1p(train["price"]) validation_Y = np.log1p(validation["price"] )<import_modules>
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix import itertools from keras.models import Sequential, load_model from keras.layers import Flatten, Dense, MaxPool2D, Conv2D, Dropout, BatchNormalization from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint from keras.utils.np_utils import to_categorical import tensorflow as tf
Digit Recognizer
14,542,096
from sklearn.linear_model import Ridge<train_model>
train =pd.read_csv('.. /input/digit-recognizer/train.csv') test =pd.read_csv('.. /input/digit-recognizer/test.csv' )
Digit Recognizer
14,542,096
model = Ridge( alpha=.5, copy_X=True, fit_intercept=True, max_iter=100, normalize=False, random_state=101, solver='auto', tol=0.01 ) model.fit(train_X, train_Y )<compute_test_metric>
y_train = train['label'] X_train = train.drop('label', axis=1 )
Digit Recognizer
14,542,096
def rmsle(y, y0): assert len(y)== len(y0) return np.sqrt(np.mean(np.power(np.log1p(y)-np.log1p(y0), 2)) )<predict_on_test>
train.isnull().sum()
Digit Recognizer
14,542,096
val_pred = model.predict(validation_X) val_pred = np.expm1(val_pred) print(rmsle(val_pred, validation["price"]))<predict_on_test>
train.isnull().sum()
Digit Recognizer
14,542,096
test_pred = model.predict(test_X) submission['price'] = np.expm1(test_pred )<save_to_csv>
X_train = X_train / 255.0 test = test / 255.0 X_train = X_train.values.reshape(-1, 28,28,1) test = test.values.reshape(-1, 28,28,1) y_train = to_categorical(y_train, num_classes=10 )
Digit Recognizer
14,542,096
submission.to_csv("submission.csv", index=False )<import_modules>
X_train, X_test, y_train, y_test = train_test_split(X_train, y_train) print("X_train shape: ", X_train.shape," X_test shape: ", X_test.shape, " y_train shape: ", y_train.shape," y_test shape: ", y_test.shape )
Digit Recognizer
14,542,096
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import train_test_split<load_pretrained>
IMG_SIZE = 28 batch_size= 86 AUTOTUNE = tf.data.experimental.AUTOTUNE num_classes = 10
Digit Recognizer
14,542,096
!apt-get install p7zip !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/train.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/test.tsv.7z !p7zip -d -f -k /kaggle/input/mercari-price-suggestion-challenge/sample_submission.csv.7z<load_from_csv>
from keras.models import Sequential, load_model from keras.layers import Flatten, Dense, MaxPool2D, Conv2D, Dropout, BatchNormalization from keras.optimizers import RMSprop from keras.preprocessing.image import ImageDataGenerator from tensorflow.keras import layers
Digit Recognizer
14,542,096
!unzip /kaggle/input/mercari-price-suggestion-challenge/sample_submission_stg2.csv.zip !unzip /kaggle/input/mercari-price-suggestion-challenge/test_stg2.tsv.zip<load_from_csv>
def create_model(train_ds, val_ds, epochs): model = tf.keras.Sequential([ layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same', input_shape=(28,28,1)) , layers.Conv2D(32,(3, 3), activation = 'relu', padding = 'same'), layers.BatchNormalization() , layers.MaxPooling2D(2, 2), layers.Dropout(0.2), layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'), layers.Conv2D(64,(3, 3), activation = 'relu', padding = 'same'), layers.BatchNormalization() , layers.MaxPooling2D(2, 2), layers.Dropout(0.2), layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'), layers.Conv2D(128,(3, 3), activation = 'relu', padding = 'same'), layers.BatchNormalization() , layers.MaxPooling2D(2, 2), layers.Dropout(0.2), layers.Flatten() , layers.Dense(512, activation = 'relu'), layers.BatchNormalization() , layers.Dropout(0.2), layers.Dense(256, activation = 'relu'), layers.BatchNormalization() , layers.Dropout(0.35), layers.Dense(10, activation = 'softmax') ]) model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy() , metrics=['accuracy']) print(model.summary()) return model
Digit Recognizer
14,542,096
train_data = pd.read_csv('train.tsv', sep='\t') train_data.head(5 )<count_missing_values>
cnn_model = create_model(X_train, y_train, 100 )
Digit Recognizer
14,542,096
train_data.isnull().sum()<split>
optimizer = RMSprop(lr = 0.001, rho = 0.9, epsilon=1e-08, decay =0.0) cnn_model.compile(optimizer = optimizer, loss = 'categorical_crossentropy', metrics = ['accuracy'] )
Digit Recognizer
14,542,096
train_data = train_data[train_data['price'] > 0].reset_index(drop=True) train_data,validation_data=train_test_split(train_data,test_size=0.1,random_state=42) print(train_data.shape) print(validation_data.shape )<count_missing_values>
learning_rate_reduction = ReduceLROnPlateau(monitor='val_loss', patience=3, verbose=1, factor=0.5, min_lr=0.00001 )
Digit Recognizer
14,542,096
train_data.isnull().sum()<count_missing_values>
datagen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, zca_epsilon=1e-06, rotation_range=0, width_shift_range=0.0, height_shift_range=0.0, brightness_range=None, shear_range=0.0, zoom_range=0.0, channel_shift_range=0.0, fill_mode='nearest', cval=0.0, horizontal_flip=False, vertical_flip=False, rescale=None, preprocessing_function=None, data_format=None, validation_split=0.0, dtype=None ) datagen.fit(X_train )
Digit Recognizer
14,542,096
validation_data.isnull().sum()<create_dataframe>
history = cnn_model.fit(datagen.flow(X_train,y_train, batch_size=batch_size), epochs = 28, validation_data =(X_test,y_test), verbose = 2, steps_per_epoch=X_train.shape[0] // batch_size , callbacks=[learning_rate_reduction] )
Digit Recognizer
14,542,096
train = train_data.copy() valid = validation_data.copy()<feature_engineering>
prediction = cnn_model.predict(X_test ).argmax(axis=1)
Digit Recognizer
14,542,096
def split_categories(category): try: sub_category1,sub_category2,sub_category3 = category.split("/") return sub_category1,sub_category2,sub_category3 except: return("No label","No label","No label") def create_split_categories(data): data['sub_category1'],data['sub_category2'],data['sub_category3']=zip(*data['category_name'].\ apply(lambda x: split_categories(x)) )<string_transform>
score = cnn_model.evaluate(X_test, y_test, verbose = 0) print("Test Accuracy: ",score[1] )
Digit Recognizer
14,542,096
<categorify><EOS>
result = cnn_model.predict(test) result = np.argmax(result, axis=1) result = pd.Series(result, name='Label') submission = pd.concat([pd.Series(range(1,28001),name = "ImageId"),result],axis = 1) submission.to_csv("submission.csv",index=False )
Digit Recognizer