kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,775,677 | def transform_sex(dataframe):
column_name = 'Sex'
genders = {"female": 0, "male": 1}
return dataframe[column_name].map(genders)
<categorify> | auto = tf.data.experimental.AUTOTUNE
def make_dataset(train_input, train_label):
dataset = tf.data.Dataset.from_tensor_slices(
(
train_input,
train_label
)
).repeat().shuffle(batch_size ).batch(batch_size ).prefetch(auto)
return dataset
def xlm_roberta_encode(hypotheses, premises, src_langs, augmentation=False):
num_examples = len(hypotheses)
sentence_1 = [tokenizer.encode(s)for s in premises]
sentence_2 = [tokenizer.encode(s)for s in hypotheses]
input_word_ids = list(map(lambda x: x[0]+x[1], list(zip(sentence_1,sentence_2))))
input_mask = [np.ones_like(x)for x in input_word_ids]
inputs = {
'input_word_ids': tf.keras.preprocessing.sequence.pad_sequences(input_word_ids, padding='post'),
'input_mask': tf.keras.preprocessing.sequence.pad_sequences(input_mask, padding='post')
}
return inputs
train_df, validation_df = train_test_split(df_train, test_size=0.1)
df_train['prediction'] = 0
num_augmentation = 1
train_input = xlm_roberta_encode(train_df.hypothesis.values,train_df.premise.values, train_df.lang_abv.values, augmentation=False)
train_label = train_df.label.values
train_sequence = make_dataset(train_input, train_label)
validation_input = xlm_roberta_encode(validation_df.hypothesis.values, validation_df.premise.values,validation_df.lang_abv.values, augmentation=False)
validation_label = validation_df.label.values
tf.keras.backend.clear_session() | Contradictory, My Dear Watson |
11,775,677 | column_name = 'Sex'
dataframe[column_name] = transform_sex(dataframe)
dataframe.head(10)
<create_dataframe> | train_df, validation_df = train_test_split(df_train, test_size=0.1)
df_train['prediction'] = 0
num_augmentation = 1
train_input = xlm_roberta_encode(train_df.hypothesis.values,train_df.premise.values, train_df.lang_abv.values, augmentation=False)
train_label = train_df.label.values
train_sequence = make_dataset(train_input, train_label)
validation_input = xlm_roberta_encode(validation_df.hypothesis.values, validation_df.premise.values,validation_df.lang_abv.values, augmentation=False)
validation_label = validation_df.label.values
tf.keras.backend.clear_session() | Contradictory, My Dear Watson |
11,775,677 | def transform_embarked(dataframe):
column_name = 'Embarked'
dataframe[column_name].fillna('S')
dataframe = pd.concat(
[
dataframe,
pd.get_dummies(dataframe[column_name], prefix=column_name)
],
axis=1)
dataframe = dataframe.drop(columns=[column_name, 'Embarked_Q'])
return dataframe
<create_dataframe> | n_steps =(len(train_label)) // batch_size
with strategy.scope() :
history = model.fit(
train_sequence, shuffle=True, steps_per_epoch=n_steps,
validation_data =(validation_input, validation_label), epochs=50, verbose=1,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=10),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=5),
tf.keras.callbacks.ModelCheckpoint(
'model.h5', monitor='val_accuracy', save_best_only=True,save_weights_only=True)
]
)
model.load_weights('model.h5')
validation_predictions = model.predict(validation_input)
validation_predictions = np.argmax(validation_predictions, axis=-1)
validation_df['predictions'] = validation_predictions
acc = accuracy_score(validation_label, validation_predictions)
print('Accuracy: {}'.format(acc)) | Contradictory, My Dear Watson |
11,775,677 | <categorify><EOS> | if test_input is None:
test_input = xlm_roberta_encode(df_test.hypothesis.values, df_test.premise.values, df_test.lang_abv.values,augmentation=False)
test_split_predictions = model.predict(test_input)
predictions = np.argmax(test_split_predictions, axis=-1)
submission = df_test.id.copy().to_frame()
submission['prediction'] = predictions
submission.head()
submission.to_csv("submission.csv", index = False ) | Contradictory, My Dear Watson |
11,470,784 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<create_dataframe> | import os
import numpy as np
import pandas as pd
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import transformers
from transformers import TFAutoModel, AutoTokenizer
from tqdm.notebook import tqdm
import plotly.express as px | Contradictory, My Dear Watson |
11,470,784 | column_name = 'Age'
dataframe = transform_age(dataframe)
dataframe.head(10)
<create_dataframe> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
11,470,784 | def transform_pclass(dataframe):
column_name = 'Pclass'
dataframe = pd.concat(
[
dataframe,
pd.get_dummies(dataframe[column_name], prefix=column_name)
],
axis=1)
dataframe = dataframe.drop(columns=[column_name, 'Pclass_2'])
return dataframe
<prepare_output> | model_name = 'jplu/tf-xlm-roberta-large'
n_epochs = 10
max_len = 80
batch_size = 16 * strategy.num_replicas_in_sync | Contradictory, My Dear Watson |
11,470,784 | column_name = 'Pclass'
dataframe = transform_pclass(dataframe)
dataframe.head(10)
<create_dataframe> | train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv' ) | Contradictory, My Dear Watson |
11,470,784 | def transform_relatives(dataframe):
column_name = 'Relatives'
dataframe[column_name] = dataframe['SibSp'] + dataframe['Parch']
dataframe = dataframe.drop(columns=['SibSp', 'Parch'])
dataframe[column_name] = dataframe[column_name].astype(int)
dataframe.loc[dataframe[column_name] >= 6, column_name] = 6
return dataframe
<create_dataframe> | twitter = pd.read_csv('/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv',
encoding = 'latin1',
names=['polarity','id','date','flag','user','text'] ) | Contradictory, My Dear Watson |
11,470,784 | column_name = 'Relatives'
dataframe = transform_relatives(dataframe)
dataframe.head(10)
<set_options> | twitter_premise, twitter_hypothesis = train_test_split(twitter['text'], test_size=0.5, random_state=2020 ) | Contradictory, My Dear Watson |
11,470,784 | corr.style.background_gradient()
<prepare_x_and_y> | twitter_df = pd.DataFrame()
twitter_df['premise'] = twitter_premise.values
twitter_df['hypothesis'] = twitter_hypothesis.values
twitter_df.head() | Contradictory, My Dear Watson |
11,470,784 | minimal_features = [
"Sex", "Pclass_3"
]
X = dataframe[minimal_features]
y = dataframe['Survived']
<categorify> | tokenizer = AutoTokenizer.from_pretrained(model_name ) | Contradictory, My Dear Watson |
11,470,784 | def preprocessing_data(source):
columns = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare']
target = source.drop(columns=columns)
column_name = 'Sex'
target[column_name] = transform_sex(target)
column_name = 'Embarked'
target = transform_embarked(target)
column_name = 'Age'
target = transform_age(target)
column_name = 'Pclass'
target = transform_pclass(target)
column_name = 'Relatives'
target = transform_relatives(target)
return target
<train_model> | %%time
train_text = train[['premise', 'hypothesis']].values.tolist()
test_text = test[['premise', 'hypothesis']].values.tolist()
twitter_text = twitter_df[['premise', 'hypothesis']].values.tolist()
train_encoded = tokenizer.batch_encode_plus(
train_text,
pad_to_max_length=True,
max_length=max_len
)
test_encoded = tokenizer.batch_encode_plus(
test_text,
pad_to_max_length=True,
max_length=max_len
)
twitter_encoded = tokenizer.batch_encode_plus(
twitter_text,
pad_to_max_length=True,
max_length=max_len
) | Contradictory, My Dear Watson |
11,470,784 | print('LinearRegression: ')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
<predict_on_test> | x_train, x_valid, y_train, y_valid = train_test_split(
train_encoded['input_ids'], train.label.values,
test_size=0.2, random_state=2020
)
x_test = test_encoded['input_ids']
x_twitter_train1, x_twitter_train2 = train_test_split(
twitter_encoded['input_ids'],
test_size=0.5, random_state=2020
) | Contradictory, My Dear Watson |
11,470,784 | y_pred = linreg.predict(X_test)
y_pred<categorify> | %%time
auto = tf.data.experimental.AUTOTUNE
train_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
)
train_twitter_dataset1 =(
tf.data.Dataset
.from_tensor_slices(x_twitter_train1)
.batch(batch_size)
)
train_twitter_dataset2 =(
tf.data.Dataset
.from_tensor_slices(x_twitter_train2)
.batch(batch_size)
) | Contradictory, My Dear Watson |
11,470,784 | def transform_predicted(dataframe, threshold = 0.5):
column_name = 'Predicted'
dataframe.loc[dataframe[column_name] >= threshold, column_name] = 1
dataframe.loc[dataframe[column_name] < threshold, column_name] = 0
dataframe[column_name] = dataframe[column_name].astype(int)
return dataframe<predict_on_test> | with strategy.scope() :
transformer_encoder = TFAutoModel.from_pretrained(model_name)
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
last_hidden_states = transformer_encoder(input_ids)[0]
cls_token = last_hidden_states[:, 0, :]
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.summary() | Contradictory, My Dear Watson |
11,470,784 | threshold = 0.5
y_pred2 = pd.DataFrame({'Predicted': y_pred})
y_pred2 = transform_predicted(y_pred2, threshold)
y_pred2.head(10)
<prepare_output> | n_steps = len(x_train)// batch_size
train_history1 = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=n_epochs
) | Contradictory, My Dear Watson |
11,470,784 | y_test2 = pd.DataFrame({'Survived': y_test})
y_test2.head(10)
<compute_test_metric> | test_preds1 = model.predict(test_dataset, verbose=1 ) | Contradictory, My Dear Watson |
11,470,784 | cm = confusion_matrix(y_test2['Survived'], y_pred2['Predicted'])
print(cm)
<train_model> | twitter_preds = model.predict(train_twitter_dataset1, verbose=1)
twitter_pseudo_labels_train1 = twitter_preds.argmax(axis=1)
twitter_preds = model.predict(train_twitter_dataset2, verbose=1)
twitter_pseudo_labels_train2 = twitter_preds.argmax(axis=1 ) | Contradictory, My Dear Watson |
11,470,784 | def display_cm_report(cm):
TN = cm[0][0]
FP = cm[0][1]
FN = cm[1][0]
TP = cm[1][1]
print()
print("TP: {}".format(TP))
print("FP: {}".format(FP))
print("FN: {}".format(FN))
print("TN: {}
".format(TN))
accuracy =(TP + TN)/(TP + TN + FP + FN)
precision = TP /(TP + FP)
recall = TP /(TP + FN)
f1_score = 2 *(precision * recall)/(precision + recall)
print("accuracy: {}".format(accuracy))
print("precision: {}".format(precision))
print("recall: {}".format(recall))
print("f1_score: {}
".format(f1_score))
plt.clf()
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)
plt.ylabel('True label')
plt.xlabel('Predicted label')
classNames = ['Negative', 'Positive']
tick_marks = np.arange(len(classNames))
plt.xticks(tick_marks, classNames)
plt.yticks(tick_marks, classNames)
s = [['TN', 'FP'], ['FN', 'TP']]
for i in range(2):
for j in range(2):
plt.text(j, i, str(s[i][j])+ " = " + str(cm[i][j]))
plt.show()
<load_from_csv> | twitter_dataset_ssl1 =(
tf.data.Dataset
.from_tensor_slices(( x_twitter_train1, twitter_pseudo_labels_train1))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
twitter_dataset_ssl2 =(
tf.data.Dataset
.from_tensor_slices(( x_twitter_train2, twitter_pseudo_labels_train2))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
) | Contradictory, My Dear Watson |
11,470,784 | test_dataframe = pd.read_csv(".. /input/test.csv", sep=",")
<create_dataframe> | n_steps = len(x_twitter_train1)// batch_size
n_steps_val = len(x_valid)// batch_size
model.fit(
twitter_dataset_ssl1,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
validation_steps=n_steps_val,
epochs=2
)
test_preds2 = model.predict(test_dataset, verbose=1 ) | Contradictory, My Dear Watson |
11,470,784 | test_csv = pd.DataFrame()
test_csv['PassengerId'] = test_dataframe['PassengerId']
<prepare_output> | n_steps = len(x_twitter_train2)// batch_size
n_steps_val = len(x_valid)// batch_size
model.fit(
twitter_dataset_ssl2,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
validation_steps=n_steps_val,
epochs=2
)
test_preds3 = model.predict(test_dataset, verbose=1 ) | Contradictory, My Dear Watson |
11,470,784 | test_dataframe = preprocessing_data(test_dataframe)
test_dataframe = test_dataframe[minimal_features]
test_dataframe.head(10)
<predict_on_test> | test_preds =(0.92)*test_preds1 +(0.05)*test_preds2 +(0.03)*test_preds3
test_preds = test_preds.argmax(axis=1)
submission['prediction'] = test_preds | Contradictory, My Dear Watson |
11,470,784 | <load_from_csv><EOS> | submission.to_csv('submission.csv', index=False)
submission.head() | Contradictory, My Dear Watson |
11,685,223 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<save_to_csv> | os.environ["WANDB_API_KEY"] = "0"
%matplotlib inline
print(f"Using Tensorflow version: {tf.__version__}")
tfa.register_all(custom_kernels=False)
!pip install -q transformers==3.1.0
print(f"Using Transformers version: {transformers.__version__}")
| Contradictory, My Dear Watson |
11,685,223 | test_csv['Survived'] = y_pred2['Predicted']
test_csv.to_csv('submission_all_lin_0.5.csv', index=False)
<train_model> | DEVICE = 'TPU'
if DEVICE == "TPU":
print("connecting to TPU...")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("initializing TPU...")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("TPU initialized")
except _:
print("failed to initialize TPU")
else: DEVICE = "GPU"
if DEVICE != "TPU":
print("Using default strategy for CPU and single GPU")
strategy = tf.distribute.get_strategy()
if DEVICE == "GPU":
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print('REPLICAS: {}'.format(REPLICAS))
| Contradictory, My Dear Watson |
11,685,223 | print('RandomForestClassifier: ')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
forest_model = RandomForestClassifier(n_estimators=100, criterion='entropy', random_state=1)
forest_model.fit(X_train, y_train)
<predict_on_test> | SEED = 777
DATA_PATH = ".. /input/contradictory-my-dear-watson/"
MODEL = 'jplu/tf-xlm-roberta-large'
TOKENIZER = transformers.AutoTokenizer.from_pretrained(MODEL)
MAX_LEN = 96
BATCH_SIZE = 16 * REPLICAS
EPOCHS= 10
FOLDS = 5
NUM_CLASSES = 3
OPT_TYPE = "Adam"
LR = 1e-5
| Contradictory, My Dear Watson |
11,685,223 | y_pred = forest_model.predict(X_test)
<prepare_output> | train = pd.read_csv(DATA_PATH+"train.csv")
test = pd.read_csv(DATA_PATH+"test.csv" ) | Contradictory, My Dear Watson |
11,685,223 | threshold = 0.5
y_pred2 = pd.DataFrame({'Predicted': y_pred})
y_pred2 = transform_predicted(y_pred2)
y_pred2.head(10)
<prepare_output> | train.loc[:, 'lang_label'] = train.loc[:, ['lang_abv', 'label']].apply(lambda row: row['lang_abv']+'-'+str(row['label']), axis=1 ) | Contradictory, My Dear Watson |
11,685,223 | y_test2 = pd.DataFrame({'Survived': y_test})
y_test2.head(10)
<compute_test_metric> | out = TOKENIZER(train.loc[2,'premise'],train.loc[2,'hypothesis'],add_special_tokens=True, max_length=40, padding='max_length',truncation = True,return_token_type_ids=True)
out | Contradictory, My Dear Watson |
11,685,223 | cm = confusion_matrix(y_test2['Survived'], y_pred2['Predicted'])
print(cm)
<predict_on_test> | def preprocess_input_data(idx, tokenizer, label = False, dataset = 'train',max_len = MAX_LEN):
if dataset == 'train':
enc = TOKENIZER(train.loc[idx,['premise', 'hypothesis']].values.tolist() , max_length=max_len, padding= 'max_length', add_special_tokens=True,truncation = True,return_token_type_ids=True)
elif dataset == 'test':
enc = TOKENIZER(test.loc[idx,['premise', 'hypothesis']].values.tolist() , max_length=max_len, padding= 'max_length', add_special_tokens=True,truncation = True,return_token_type_ids=True)
attention_mask = enc.attention_mask
input_ids = enc.input_ids
token_type_ids = enc.token_type_ids
input_tokens = [tokenizer.convert_ids_to_tokens(enc.input_ids[i])for i in range(len(enc.input_ids)) ]
output_dict = {'token_type_ids': np.array(token_type_ids ).astype('int32'),
'input_ids': np.array(input_ids ).astype('int32'),
'input_tokens': input_tokens,
'attention_mask': np.array(attention_mask ).astype('int32'),
}
if label:
output_dict['labels'] = to_categorical(train.loc[idx, 'label'], num_classes = NUM_CLASSES ).astype('int32')
return output_dict
processed_dict = preprocess_input_data(np.arange(train.shape[0]), tokenizer=TOKENIZER, label=True, dataset = 'train',max_len = MAX_LEN)
input_ids, attention_mask, token_type_ids,labels = processed_dict['input_ids'],processed_dict['attention_mask'], processed_dict['token_type_ids'], processed_dict['labels']
processed_dict = preprocess_input_data(np.arange(test.shape[0]), tokenizer=TOKENIZER, label=False, dataset = 'test',max_len = MAX_LEN)
input_ids_test, attention_mask_test, token_type_ids_test = processed_dict['input_ids'],processed_dict['attention_mask'], processed_dict['token_type_ids'] | Contradictory, My Dear Watson |
11,685,223 | y_pred = forest_model.predict(test_dataframe)
threshold = 0.5
y_pred2 = pd.DataFrame({'Predicted': y_pred})
y_pred2 = transform_predicted(y_pred2)
y_pred2.head(10)
<load_from_csv> | def data_loader(idx,data= 'train', labelled=True, return_ids=False, repeat=True, shuffle=True,cache=True, batch_size = BATCH_SIZE):
if data == 'train':
if labelled:
dataset = tf.data.Dataset.from_tensor_slices(((input_ids[idx,],attention_mask[idx,],token_type_ids[idx,]),
labels[idx,]))
else:
dataset = tf.data.Dataset.from_tensor_slices(((input_ids[idx,],attention_mask[idx,],token_type_ids[idx,]),))
if return_ids:
dataset = tf.data.Dataset.from_tensor_slices(((input_ids[idx,],attention_mask[idx,],token_type_ids[idx,], train.loc[idx,'id'].values),))
elif data == 'test':
dataset = tf.data.Dataset.from_tensor_slices(((input_ids_test,attention_mask_test,token_type_ids_test),))
if cache:
dataset = dataset.cache()
if shuffle:
dataset = dataset.shuffle(2048)
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size ).prefetch(AUTO)
return dataset
for out in data_loader(np.arange(10),batch_size=5 ).unbatch().take(1):
print(out[0], out[1] ) | Contradictory, My Dear Watson |
11,685,223 | y_test = pd.read_csv(".. /input/gender_submission.csv", sep=",")
cm = confusion_matrix(y_test['Survived'], y_pred2['Predicted'])
print(cm)
<save_to_csv> | def get_lr_callback(PLOT_LR = False):
lr_start = 1e-5
lr_max = 1.5e-5
lr_min = 1e-5
lr_ramp_ep = 3
lr_sus_ep = 0
lr_decay = 0.8
def lrfn(epoch):
if epoch < lr_ramp_ep:
lr =(lr_max - lr_start)/ lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
lr =(lr_max - lr_min)* lr_decay**(epoch - lr_ramp_ep - lr_sus_ep)+ lr_min
return lr
if PLOT_LR:
plt.figure(figsize=(15, 5))
plt.subplot(1, 2, 1)
plt.plot([lrfn(e)for e in range(EPOCHS)]);
plt.xlabel('Epoch'); plt.ylabel('LR');
plt.subplot(1, 2, 2);
plt.plot([lrfn(e)for e in range(EPOCHS)]);
plt.xlabel('Epoch'); plt.ylabel('Log LR');
plt.yscale('log');
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
return lr_callback
get_lr_callback(PLOT_LR = True)
def display_training_curves(training, validation, title, subplot):
if subplot%10==1:
plt.subplots(figsize=(10,10), facecolor='
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
def build_model(opt = OPT_TYPE):
ids = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32)
att = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32)
tok = tf.keras.layers.Input(( MAX_LEN,), dtype=tf.int32)
transformer_embedding = transformers.TFAutoModel.from_pretrained(MODEL)
x = transformer_embedding({'input_ids':ids,'attention_mask': att, 'token_type_ids':tok})[0][:,0,:]
x = tf.keras.layers.Dropout(0.3 )(x)
x = tf.keras.layers.Dense(128, activation="relu" )(x)
x = tf.keras.layers.Dense(3 )(x)
x = tf.keras.layers.Activation('softmax' )(x)
model = tf.keras.Model(inputs = [ids,att,tok], outputs = x)
if opt == 'Adam':
optimizer = tf.keras.optimizers.Adam(learning_rate=LR)
elif OPT_TYPE == "RAdam":
optimizer = tfa.optimizers.RectifiedAdam(lr=LR)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer = optimizer, loss=loss_fn, metrics = ['accuracy'])
return model | Contradictory, My Dear Watson |
11,685,223 | test_csv['Survived'] = y_pred2['Predicted']
test_csv.to_csv('submission_all_tree_0.5.csv', index=False)
<train_model> | def memory() :
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info() [0]/2.**30
print('memory use:', memoryUse ) | Contradictory, My Dear Watson |
11,685,223 | print('MLPRegressor: ')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
mlpr_model = MLPRegressor()
mlpr_model.fit(X_train, y_train)
<predict_on_test> | display_training_curve=True
oof_preds = np.zeros(shape =(train.shape[0],3))
ypreds_test = np.zeros(shape =(test.shape[0],3))
val_ids = []
print(f'DEVICE: {DEVICE}')
skf = StratifiedKFold(n_splits=FOLDS,shuffle=True,random_state=SEED)
for fold,(idxT,idxV)in enumerate(skf.split(train,train.lang_label.values)) :
print('
print(f'
print('
print(f"Training on {len(idxT)} examples with batch size = {BATCH_SIZE}, validate on {len(idxV)} examples")
if DEVICE=='TPU':
if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)
memory()
train_dataset = data_loader(idxT,labelled=True,repeat=True, shuffle=True)
valid_dataset = data_loader(idxV,labelled=True,repeat=False, shuffle=False, cache=False)
K.clear_session()
with strategy.scope() :
model = build_model()
mod_checkpoint = tf.keras.callbacks.ModelCheckpoint("fold{}.h5".format(fold+1), monitor="val_accuracy",
verbose=1, save_best_only=True,
save_weights_only=True, mode='max', save_freq='epoch')
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', factor=0.5,verbose=1,mode='max',
patience=2, min_lr=5e-6)
history = model.fit(train_dataset, epochs=EPOCHS, batch_size=BATCH_SIZE, verbose=1,
steps_per_epoch= len(idxT)//BATCH_SIZE, callbacks=[mod_checkpoint,get_lr_callback() ],
validation_data=valid_dataset)
print("-"*5 +" Loading model weights from best epoch "+"-"*5)
try:
model.load_weights("fold{}.h5".format(fold+1))
print('Done')
except OSError:
print(f'Unable to load model!')
valid_dataset_unlabelled = data_loader(idxV,labelled=False,return_ids=False,repeat=False, shuffle=False, cache=False)
oof_preds[idxV,] = model.predict(valid_dataset_unlabelled, steps = len(idxV)/BATCH_SIZE)
oof_acc = accuracy_score(np.argmax(oof_preds[idxV,],axis=1), train.label.values[idxV])
print(f' Out-of-fold accuracy score: {oof_acc}')
valid_dataset_unlabelled = data_loader(idxV,labelled=False,return_ids=True,repeat=False, shuffle=False)
val_ids.extend([sample[0][3].numpy().decode('utf-8')for sample in valid_dataset_unlabelled.unbatch() ])
test_dataset = data_loader(_,data='test', labelled=False,repeat=False, shuffle=False, cache=False)
ypreds_test += model.predict(test_dataset, steps = test.shape[0]/BATCH_SIZE)/FOLDS
os.remove("fold{}.h5".format(fold+1))
if display_training_curve:
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 211)
display_training_curves(history.history['accuracy'], history.history['val_accuracy'], 'Accuracy', 212)
gc.collect() | Contradictory, My Dear Watson |
11,685,223 | y_pred = mlpr_model.predict(X_test)
<prepare_output> | ypred_oof = np.argmax(oof_preds,axis=1)
print(f"{FOLDS}-fold CV accuracy score = {accuracy_score(ypred_oof, train.label.values)}")
oof_df = pd.DataFrame(list(zip(val_ids,ypred_oof.tolist())) ,columns = ['id','pred'])
oof_df.to_csv('oof.csv', index=False)
oof_df.head() | Contradictory, My Dear Watson |
11,685,223 | <prepare_output><EOS> | ypred_sub = np.argmax(ypreds_test, axis=1)
sub_df = pd.read_csv(".. /input/contradictory-my-dear-watson/sample_submission.csv")
sub_df.loc[:,'prediction'] = ypred_sub
sub_df.to_csv('submission.csv',index=False)
sub_df.head() | Contradictory, My Dear Watson |
11,543,499 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<compute_test_metric> | !pip install --upgrade pip > /dev/null
!pip install --upgrade transformers > /dev/null
!pip install nlp > /dev/null | Contradictory, My Dear Watson |
11,543,499 | cm = confusion_matrix(y_test2['Survived'], y_pred2['Predicted'])
print(cm)
<predict_on_test> | import os
import gc
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import plotly.express as px
from tensorflow.keras.layers import Dense, Input, GlobalAveragePooling1D, GlobalMaxPooling1D
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
import tensorflow as tf
from transformers import TFAutoModel, AutoTokenizer
import nlp | Contradictory, My Dear Watson |
11,543,499 | y_pred = mlpr_model.predict(test_dataframe)
threshold = 0.5
tansform_survived = np.vectorize(lambda x: 1 if x >= threshold else 0)
y_pred2 = tansform_survived(y_pred)
y_pred2 = pd.DataFrame({'Predicted': y_pred2})
<load_from_csv> | def init_strategy() :
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("Init TPU strategy")
except ValueError:
strategy = tf.distribute.get_strategy()
print("Init CPU/GPU strategy")
return strategy
def build_model(model_name, maxlen, head="avg_pooling"):
input_ids = Input(shape=(maxlen,), dtype=tf.int32, name="input_ids")
encoder = TFAutoModel.from_pretrained(model_name)
encoder_output = encoder(input_ids)[0]
if head == "cls":
features = encoder_output[:, 0, :]
elif head == "avg_pooling":
features = GlobalAveragePooling1D()(encoder_output)
elif head == "max_pooling":
features = GlobalMaxPooling1D()(encoder_output)
else:
raise NotImplementedError
out = Dense(3, activation='softmax' )(features)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
def tokenize_dataframe(df, tokenizer, max_length):
text = df[['premise', 'hypothesis']].values.tolist()
encoded = tokenizer.batch_encode_plus(text, padding=True, max_length=max_length, truncation=True)
x = encoded['input_ids']
y = None
if 'label' in df.columns:
y = df.label.values
return x, y
def build_dataset(x, y, mode, batch_size):
if mode == "train":
dataset =(
tf.data.Dataset
.from_tensor_slices(( x, y))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
elif mode == "valid":
dataset =(
tf.data.Dataset
.from_tensor_slices(( x, y))
.batch(batch_size)
.cache()
.prefetch(auto)
)
elif mode == "test":
dataset =(
tf.data.Dataset
.from_tensor_slices(x)
.batch(batch_size)
)
else:
raise NotImplementedError
return dataset
def load_mnli(use_validation=True):
result = []
dataset = nlp.load_dataset(path='glue', name='mnli')
keys = ['train', 'validation_matched','validation_mismatched'] if use_validation else ['train']
for k in keys:
for record in dataset[k]:
c1, c2, c3 = record['premise'], record['hypothesis'], record['label']
if c1 and c2 and c3 in {0,1,2}:
result.append(( c1,c2,c3,'en'))
result = pd.DataFrame(result, columns=['premise','hypothesis','label','lang_abv'])
return result
def load_xnli() :
result = []
dataset = nlp.load_dataset(path='xnli')
for k in dataset.keys() :
for record in dataset[k]:
hp, pr, lb = record['hypothesis'], record['premise'], record['label']
if hp and pr and lb in {0,1,2}:
for lang, translation in zip(hp['language'], hp['translation']):
pr_lang = pr.get(lang, None)
if pr_lang is None:
continue
result.append(( pr_lang, translation, lb,lang))
result = pd.DataFrame(result, columns=['premise','hypothesis','label','lang_abv'])
return result
| Contradictory, My Dear Watson |
11,543,499 | y_test = pd.read_csv(".. /input/gender_submission.csv", sep=",")
cm = confusion_matrix(y_test['Survived'], y_pred2['Predicted'])
print(cm)
<save_to_csv> | MODEL = 'jplu/tf-xlm-roberta-large'
MAXLEN = 120
strategy = init_strategy()
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
tokenizer = AutoTokenizer.from_pretrained(MODEL)
auto = tf.data.experimental.AUTOTUNE
def preprocess(df):
return tokenize_dataframe(df, tokenizer, MAXLEN ) | Contradictory, My Dear Watson |
11,543,499 | test_csv['Survived'] = y_pred2['Predicted']
test_csv.to_csv('submission_all_nn_0.5.csv', index=False)
<train_model> | train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv')
x, y = preprocess(train)
x_test, _ = preprocess(test)
test_dataset = build_dataset(x_test, None, "test", BATCH_SIZE)
mnli = load_mnli()
xnli = load_xnli() | Contradictory, My Dear Watson |
11,543,499 | print('Keras NN: ')
nn = Sequential()
nn.add(
Dense(
units=3, kernel_initializer='uniform', activation='relu', input_dim=2))
nn.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
nn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
nn.fit(X_train, y_train, batch_size=10, epochs=100)
<predict_on_test> | punct = '[' + ''.join([c for c in string.punctuation if c != "'"])+ ']'
def preprocess_query(q):
q = q.lower()
q = re.sub(punct, ' ', q)
q = re.sub('[ ]{2,}', ' ', q)
return q
def search_in_base(q, kb):
q = preprocess_query(q)
return int(q in kb)
| Contradictory, My Dear Watson |
11,543,499 | y_pred = nn.predict(X_test)
<prepare_output> | premises = pd.concat([train[['premise', 'lang_abv']], test[['premise', 'lang_abv']]] ) | Contradictory, My Dear Watson |
11,543,499 | threshold = 0.5
tansform_survived = np.vectorize(lambda x: 1 if x >= threshold else 0)
y_pred2 = tansform_survived(y_pred)
y_pred2 = pd.DataFrame({'Predicted': y_pred2[:,0]})
y_pred2.head(10)
<prepare_output> | knowledge_base = set(mnli['premise'].apply(preprocess_query))
premises['mnli'] = premises['premise'].apply(lambda q: search_in_base(q, knowledge_base))
print(f"fraction of train set english premises occurence in MNLI = {premises.loc[premises.lang_abv=='en', 'mnli'].mean() * 100}%" ) | Contradictory, My Dear Watson |
11,543,499 | y_test2 = pd.DataFrame({'Survived': y_test})
y_test2.head(10)
<compute_test_metric> | knowledge_base = set(xnli['premise'].apply(preprocess_query))
premises['xnli'] = premises['premise'].apply(lambda q: search_in_base(q, knowledge_base))
print(f"fraction of train set non-english premises occurence in XNLI = {premises.loc[premises.lang_abv!='en', 'xnli'].mean() * 100}%" ) | Contradictory, My Dear Watson |
11,543,499 | <predict_on_test><EOS> | strategy = init_strategy()
with strategy.scope() :
model = build_model(MODEL, MAXLEN)
model.load_weights(".. /input/watson-xlmr-models/XLMR_mnlixnli_ep6.h5")
dataset = build_dataset(x, y, "valid", BATCH_SIZE)
pr = np.argmax(model.predict(dataset), axis=1)
print(f"accuracy {accuracy_score(y, pr):.4f}")
test_preds = model.predict(test_dataset, verbose=0)
submission['prediction'] = test_preds.argmax(axis=1)
submission.to_csv('submission.csv', index=False ) | Contradictory, My Dear Watson |
11,144,692 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<load_from_csv> | !pip install -q nlp
!pip install -q wordcloud | Contradictory, My Dear Watson |
11,144,692 | y_test = pd.read_csv(".. /input/gender_submission.csv", sep=",")
cm = confusion_matrix(y_test['Survived'], y_pred2['Predicted'])
print(cm)
<save_to_csv> | nltk.download("stopwords")
| Contradictory, My Dear Watson |
11,144,692 | test_csv['Survived'] = y_pred2['Predicted']
test_csv.to_csv('submission_all_keras_nn_0.5.csv', index=False)
<set_options> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
print('Number of replicas:', strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
11,144,692 | %matplotlib inline<load_from_csv> | train_csv = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
test_csv = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv" ) | Contradictory, My Dear Watson |
11,144,692 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv")
train.head()<count_missing_values> | AUTO = tf.data.experimental.AUTOTUNE
MODEL_NAME = "jplu/tf-xlm-roberta-base"
REPLICAS = strategy.num_replicas_in_sync
TOKENIZER = AutoTokenizer.from_pretrained(MODEL_NAME)
BATCH_SIZE = 16 * REPLICAS
MAX_LEN = 192
EPOCHS = 8
SEED = 48
FONT_DIR = ".. /input/font-dataset/FontScripts/"
np.random.seed(SEED)
random.seed(SEED ) | Contradictory, My Dear Watson |
11,144,692 | print(pd.isnull(train ).sum() )<count_values> | def prepare_input_v2(sentences):
sen_enc = TOKENIZER.batch_encode_plus(sentences,
pad_to_max_length=True,
return_attention_mask=False,
return_token_type_ids=False,
max_length=MAX_LEN)
return np.array(sen_enc["input_ids"])
def get_dataset(features, labels=None, labelled=True, batch_size=8, repeat=True, shuffle=True):
if labelled:
ds = tf.data.Dataset.from_tensor_slices(( features, labels))
else:
ds = tf.data.Dataset.from_tensor_slices(features)
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(2048)
ds = ds.batch(batch_size*REPLICAS)
ds.prefetch(AUTO)
return ds
def build_model() :
encoder = TFAutoModel.from_pretrained(MODEL_NAME)
input_word_ids = tf.keras.layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name="input_word_ids")
embeddings = encoder(input_word_ids)[0]
x = embeddings[:, 0, :]
output = tf.keras.layers.Dense(3, activation="softmax" )(x)
model = tf.keras.models.Model(inputs=input_word_ids, outputs=output)
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.05),
optimizer=tf.keras.optimizers.Adam(lr=1e-5),
metrics=["accuracy"])
return model
def ratio_languages(df):
languages = np.unique(df.language)
total = df.language.value_counts().sum()
ratios = {}
for e in languages:
ratios[e] = round(( df.language.value_counts().loc[e] / total), 2)*100
ratios = sorted(ratios.items() , key=lambda x:(x[1],x[0]), reverse=True)
languages = []
values = []
for e in ratios:
languages.append(e[0])
values.append(e[1])
_, texts, _ = plt.pie(values, explode=[0.2]*(len(values)) , labels=languages, autopct="%.2i%%", radius=2,
rotatelabels=True)
for e in texts:
e.set_fontsize(15)
e.set_fontfamily('fantasy')
plt.show()
def get_lr_callback(batch_size):
lr_start = 0.000001
lr_max = 0.00000125 * batch_size
lr_min = 0.00000001
lr_sus_epoch = 0
lr_decay = 0.80
lr_ramp_ep = 5
lr = lr_start
def lrfn(epoch):
if epoch < lr_ramp_ep:
lr =(lr_max- lr_start)/lr_ramp_ep * epoch + lr_start
elif epoch <(lr_ramp_ep + lr_sus_epoch):
lr = lr_max
else:
lr =(lr_max - lr_min)*lr_decay**(epoch - lr_ramp_ep - lr_sus_epoch)+ lr_min
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=True)
return lr_callback
def plot_wordcloud(df, col):
words = " "
font_path = None
fig, ax = plt.subplots(nrows=2, ncols=2)
fig.set_size_inches(12, 12)
res = []
for i in range(2):
for j in range(2):
res.append([i,j])
for i,lang in enumerate(["English",
"Hindi",
"Urdu",
"German" ,
]):
for line in df[df.language==lang][col].values:
tokens = line.split()
tokens = [word.lower() for word in tokens]
words += " ".join(tokens)+" "
fig.add_subplot(ax[res[i][0]][res[i][1]])
if lang=="Hindi":
font_path = FONT_DIR + "Hindi.ttf"
if lang=="French":
font_path = FONT_DIR + "French.ttf"
if lang=="Russian":
font_path= FONT_DIR + "Russian.ttf"
if lang=="Arabic":
font_path = FONT_DIR + "Arabic.ttf"
if lang=="Chinese":
font_path = FONT_DIR + "Chinese.otf"
if lang=="Swahili":
font_path = FONT_DIR + "Swahili.ttf"
if lang=="Urdu":
font_path = FONT_DIR + "Urdu.ttf"
if lang=="Vietnamese":
font_path = FONT_DIR + "Vietnamese.ttf"
if lang=="Greek":
font_path = FONT_DIR + "Greek.ttf"
if lang=="Thai":
font_path = FONT_DIR + "Thai.ttf"
if lang=="Spanish":
font_path = FONT_DIR + "Spanish.ttf"
if lang=="German":
font_path = FONT_DIR + "German.ttf"
if lang=="Turkish":
font_path = FONT_DIR + "Turkish.ttf"
if lang=="Bulgarian":
font_path = FONT_DIR + "Bulgarian.ttf"
s_words = STOPWORDS
wordcloud = WordCloud(font_path=font_path, width=800, height=800,
background_color="black",
min_font_size=10,
stopwords=s_words ).generate(words)
ax[res[i][0]][res[i][1]].imshow(wordcloud)
ax[res[i][0]][res[i][1]].axis("off")
ax[res[i][0]][res[i][1]].set_title(f"Language: {lang}", fontsize=14 ) | Contradictory, My Dear Watson |
11,144,692 | print("Percentage of females who survived:", train["Survived"][train["Sex"] == 'female'].value_counts(normalize = True)[1]*100)
print("Percentage of males who survived:", train["Survived"][train["Sex"] == 'male'].value_counts(normalize = True)[1]*100)
<drop_column> | print(train_csv["language"].value_counts())
print()
ratio_languages(train_csv ) | Contradictory, My Dear Watson |
11,144,692 | train = train.drop(['Cabin'], axis = 1)
test = test.drop(['Cabin'], axis = 1)
<drop_column> | xnli = nlp.load_dataset(path="xnli")
buff = {}
buff["premise"] = []
buff["hypothesis"] = []
buff["label"] = []
buff["language"] = []
uniq_lang = set()
for e in xnli["test"]:
for i in e["hypothesis"]["language"]:
uniq_lang.add(i)
language_map = {}
for e in uniq_lang:
language_map[e] = test_csv.loc[test_csv.lang_abv==e, "language"].iloc[0]
for x in xnli['test']:
label = x['label']
for idx, lang in enumerate(x['hypothesis']['language']):
if lang=="en":
continue
hypothesis = x['hypothesis']['translation'][idx]
premise = x['premise'][lang]
buff['premise'].append(premise)
buff['hypothesis'].append(hypothesis)
buff['label'].append(label)
buff['language'].append(language_map[lang])
xnli_df = pd.DataFrame(buff ) | Contradictory, My Dear Watson |
11,144,692 | train = train.drop(['Ticket'], axis = 1)
test = test.drop(['Ticket'], axis = 1 )<categorify> | xnli_df["language"].value_counts() | Contradictory, My Dear Watson |
11,144,692 | print("Number of people embarking in Southampton(S):")
southampton = train[train["Embarked"] == "S"].shape[0]
print(southampton)
print("Number of people embarking in Cherbourg(C):")
cherbourg = train[train["Embarked"] == "C"].shape[0]
print(cherbourg)
print("Number of people embarking in Queenstown(Q):")
queenstown = train[train["Embarked"] == "Q"].shape[0]
print(queenstown)
train = train.fillna({"Embarked": "S"} )<feature_engineering> | new_df = pd.concat([train_df, xnli_df], axis=0)
new_df.sample(5 ) | Contradictory, My Dear Watson |
11,144,692 | for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col',
'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
train[['Title', 'Survived']].groupby(['Title'], as_index=False ).mean()<categorify> | pd.merge(new_df, test_csv, how="inner" ) | Contradictory, My Dear Watson |
11,144,692 | title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Royal": 5, "Rare": 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train.head()<categorify> | new_df = new_df.merge(pd.merge(new_df, test_csv, how="inner"), how="left", indicator=True)
new_df = new_df[new_df._merge=="left_only"]
new_df = new_df.drop(["id", "lang_abv", "_merge"], axis=1)
new_df.info()
| Contradictory, My Dear Watson |
11,144,692 | mr_age = train[train["Title"] == 1]["AgeGroup"].mode()
miss_age = train[train["Title"] == 2]["AgeGroup"].mode()
mrs_age = train[train["Title"] == 3]["AgeGroup"].mode()
master_age = train[train["Title"] == 4]["AgeGroup"].mode()
royal_age = train[train["Title"] == 5]["AgeGroup"].mode()
rare_age = train[train["Title"] == 6]["AgeGroup"].mode()
age_title_mapping = {1: "Young Adult", 2: "Student", 3: "Adult", 4: "Baby", 5: "Adult", 6: "Adult"}
for x in range(len(train["AgeGroup"])) :
if train["AgeGroup"][x] == "Unknown":
train["AgeGroup"][x] = age_title_mapping[train["Title"][x]]
for x in range(len(test["AgeGroup"])) :
if test["AgeGroup"][x] == "Unknown":
test["AgeGroup"][x] = age_title_mapping[test["Title"][x]]<categorify> | pd.merge(new_df, test_csv, how="inner" ) | Contradictory, My Dear Watson |
11,144,692 | age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train.head()<categorify> | ratio_languages(new_df ) | Contradictory, My Dear Watson |
11,144,692 | train = train.drop(['Age'], axis = 1)
test = test.drop(['Age'], axis = 1)
train = train.drop(['Name'], axis = 1)
test = test.drop(['Name'], axis = 1)
sex_mapping = {"male": 0, "female": 1}
train['Sex'] = train['Sex'].map(sex_mapping)
test['Sex'] = test['Sex'].map(sex_mapping)
train.head()
<categorify> | new_df.language.value_counts() | Contradictory, My Dear Watson |
11,144,692 | embarked_mapping = {"S": 1, "C": 2, "Q": 3}
train['Embarked'] = train['Embarked'].map(embarked_mapping)
test['Embarked'] = test['Embarked'].map(embarked_mapping)
train.head()
<split> | X, y = new_df[["premise", "hypothesis"]], new_df.label | Contradictory, My Dear Watson |
11,144,692 | predictors = train.drop(['Survived', 'PassengerId'], axis=1)
target = train["Survived"]
x_train, x_val, y_train, y_val = train_test_split(predictors, target, test_size = 0.22, random_state = 0 )<compute_train_metric> | X["language_label"] = new_df.language.astype(str)+ "_" + new_df.label.astype(str ) | Contradictory, My Dear Watson |
11,144,692 | svc = SVC()
svc.fit(x_train, y_train)
y_pred = svc.predict(x_val)
acc_svc = round(accuracy_score(y_pred, y_val)* 100, 2)
print(acc_svc )<save_to_csv> | print("Splitting Data...")
x_train, x_test, y_train, y_test = train_test_split(X, y, stratify=X.language_label, test_size=0.2, random_state=SEED)
y_train = tf.one_hot(y_train, depth=3)
y_test = tf.one_hot(y_test, depth=3)
print("Prepairing Input...")
train_input = prepare_input_v2(x_train[["premise", "hypothesis"]].values.tolist())
valid_input = prepare_input_v2(x_test[["premise", "hypothesis"]].values.tolist())
print("Preparing Dataset...")
train_dataset = get_dataset(train_input, y_train, labelled=True, batch_size=BATCH_SIZE, repeat=True,
shuffle=True)
valid_dataset = get_dataset(valid_input, y_test, labelled=True, batch_size=BATCH_SIZE//REPLICAS, repeat=False,
shuffle=False)
print("Downloading and Building Model...")
with strategy.scope() :
model = build_model()
lr_callback = get_lr_callback(BATCH_SIZE)
checkpoint = tf.keras.callbacks.ModelCheckpoint("XLM-R-base.h5", save_weights_only=True,
save_best_only=True, save_freq="epoch", monitor="val_loss",
mode="min")
print("Training...")
model.fit(train_dataset,
steps_per_epoch= x_train.shape[0]/BATCH_SIZE,
validation_data=valid_dataset,
epochs=EPOCHS,
callbacks=[lr_callback, checkpoint] ) | Contradictory, My Dear Watson |
11,144,692 | ids = test['PassengerId']
predictions = svc.predict(test.drop('PassengerId', axis=1))
output = pd.DataFrame({ 'PassengerId' : ids, 'Survived': predictions })
output.to_csv('submission.csv', index=False )<set_options> | test_input = prepare_input_v2(test_csv[["premise", "hypothesis"]].values.tolist())
test_dataset = get_dataset(test_input, None, labelled=False, batch_size=BATCH_SIZE, repeat=False, shuffle=False ) | Contradictory, My Dear Watson |
11,144,692 | %config InlineBackend.figure_format = 'retina'
%matplotlib inline<load_from_csv> | preds = model.predict(test_dataset ) | Contradictory, My Dear Watson |
11,144,692 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )<load_from_csv> | preds = preds.argmax(axis=1 ) | Contradictory, My Dear Watson |
11,144,692 | train = pd.read_csv(".. /input/train.csv")
test = pd.read_csv(".. /input/test.csv" )<concatenate> | submission = pd.read_csv(".. /input/contradictory-my-dear-watson/sample_submission.csv")
submission.head() | Contradictory, My Dear Watson |
11,144,692 | all_data = pd.concat(( train.loc[:,'MSSubClass':'SaleCondition'],
test.loc[:,'MSSubClass':'SaleCondition']))<feature_engineering> | submission["prediction"] = preds | Contradictory, My Dear Watson |
11,144,692 | <categorify><EOS> | submission.to_csv("submission.csv", header=True, index=False ) | Contradictory, My Dear Watson |
11,248,074 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<data_type_conversions> | os.environ["WANDB_API_KEY"] = "0"
!pip uninstall -y transformers
!pip install transformers
!pip install nlp
strategy = None
def seed_all(seed=2020):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed_all(2020 ) | Contradictory, My Dear Watson |
11,248,074 | all_data = all_data.fillna(all_data.mean() )<prepare_x_and_y> | original_train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
original_train = shuffle(original_train)
original_valid = original_train[:len(original_train)// 5]
original_train = original_train[len(original_train)// 5:] | Contradictory, My Dear Watson |
11,248,074 | X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice<compute_train_metric> | print(f"original - training: {len(original_train)} examples")
original_train.head(10 ) | Contradictory, My Dear Watson |
11,248,074 | def rmse_cv(model):
rmse= np.sqrt(-cross_val_score(model, X_train, y, scoring="neg_mean_squared_error", cv = 5))
return(rmse )<choose_model_class> | print(f"original - validation: {len(original_valid)} examples")
original_valid.head(10 ) | Contradictory, My Dear Watson |
11,248,074 | model_ridge = Ridge()<define_search_space> | original_test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv")
print(f"original - test: {len(original_test)} examples")
original_test.head(10 ) | Contradictory, My Dear Watson |
11,248,074 | alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha = alpha)).mean()
for alpha in alphas]<train_on_grid> | mnli = nlp.load_dataset(path='glue', name='mnli' ) | Contradictory, My Dear Watson |
11,248,074 | model_lasso = LassoCV(alphas = [1, 0.1, 0.001, 0.0005] ).fit(X_train, y )<find_best_params> | print(mnli, '
')
print('The split names in MNLI dataset:')
for k in mnli:
print(' ', k)
print("
mnli['train'] is ", type(mnli['train']))
mnli['train'] | Contradictory, My Dear Watson |
11,248,074 | rmse_cv(model_lasso ).mean()<define_variables> | print('The number of training examples in mnli dataset:', mnli['train'].num_rows)
print('The number of validation examples in mnli dataset - part 1:', mnli['validation_matched'].num_rows)
print('The number of validation examples in mnli dataset - part 2:', mnli['validation_mismatched'].num_rows, '
')
print('The class names in mnli dataset:', mnli['train'].features['label'].names)
print('The feature names in mnli dataset:', list(mnli['train'].features.keys()), '
')
for elt in mnli['train']:
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', mnli['train'].features['label'].names[elt['label']])
print('idx', elt['idx'])
print('-' * 80)
if elt['idx'] >= 10:
break | Contradictory, My Dear Watson |
11,248,074 | print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables" )<concatenate> | snli = nlp.load_dataset(path='snli')
print('The number of training examples in snli dataset:', snli['train'].num_rows)
print('The number of validation examples in snli dataset:', snli['validation'].num_rows, '
')
print('The class names in snli dataset:', snli['train'].features['label'].names)
print('The feature names in snli dataset:', list(snli['train'].features.keys()), '
')
for idx, elt in enumerate(snli['train']):
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', snli['train'].features['label'].names[elt['label']])
print('-' * 80)
if idx >= 10:
break | Contradictory, My Dear Watson |
11,248,074 | imp_coef = pd.concat([coef.sort_values().head(10),
coef.sort_values().tail(10)] )<import_modules> | xnli = nlp.load_dataset(path='xnli')
print('The number of validation examples in xnli dataset:', xnli['validation'].num_rows, '
')
print('The class names in xnli dataset:', xnli['validation'].features['label'].names)
print('The feature names in xnli dataset:', list(xnli['validation'].features.keys()), '
')
for idx, elt in enumerate(xnli['validation']):
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', xnli['validation'].features['label'].names[elt['label']])
print('-' * 80)
if idx >= 3:
break | Contradictory, My Dear Watson |
11,248,074 | import xgboost as xgb<train_on_grid> | for k in raw_ds_mapping:
for idx, x in enumerate(get_raw_dataset(k)) :
print(x)
if idx >= 3:
break | Contradictory, My Dear Watson |
11,248,074 | dtrain = xgb.DMatrix(X_train, label = y)
dtest = xgb.DMatrix(X_test)
params = {"max_depth":2, "eta":0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100 )<train_model> | def get_unbatched_dataset(ds_names, model_name, max_len=64):
if type(ds_names)== list:
ds_names = {k: None for k in ds_names}
ds_names = {k: v for k, v in ds_names.items() if k in raw_ds_mapping}
tokenizer = transformers.AutoTokenizer.from_pretrained(model_name, use_fast=True)
raw_datasets = [get_raw_dataset(x)for x in ds_names]
nb_examples = 0
sentence_pairs = []
labels = []
for name in ds_names:
raw_ds = get_raw_dataset(name)
nb_examples_to_use = raw_ds_mapping[name][2]
if ds_names[name]:
nb_examples_to_use = min(ds_names[name], nb_examples_to_use)
nb_examples += nb_examples_to_use
n = 0
for x in raw_ds:
sentence_pairs.append(( x['premise'], x['hypothesis']))
labels.append(x['label'])
n += 1
if n >= nb_examples_to_use:
break
r = dict(tokenizer.batch_encode_plus(batch_text_or_text_pairs=sentence_pairs, max_length=max_len, padding='max_length', truncation=True))
dataset = tf.data.Dataset.from_tensor_slices(( r, labels))
return dataset, nb_examples
def get_batched_training_dataset(dataset, nb_examples, batch_size=16, shuffle_buffer_size=1, repeat=False):
if repeat:
dataset = dataset.repeat()
if not shuffle_buffer_size:
shuffle_buffer_size = nb_examples
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def get_prediction_dataset(dataset, batch_size=16):
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset | Contradictory, My Dear Watson |
11,248,074 | model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y )<predict_on_test> | for k in raw_ds_mapping.keys() :
ds, nb_examples = get_unbatched_dataset({k: 100}, model_name='distilbert-base-uncased')
ds_batched = get_batched_training_dataset(ds, nb_examples, batch_size=16, shuffle_buffer_size=1, repeat=False)
print('{} - select {} examples'.format(k, nb_examples))
for x in ds_batched:
break | Contradictory, My Dear Watson |
11,248,074 | xgb_preds = np.expm1(model_xgb.predict(X_test))
lasso_preds = np.expm1(model_lasso.predict(X_test))<create_dataframe> | class Classifier(tf.keras.Model):
def __init__(self, model_name):
super(Classifier, self ).__init__()
self.transformer = transformers.TFAutoModel.from_pretrained(model_name)
self.dropout = tf.keras.layers.Dropout(rate=0.05)
self.global_pool = tf.keras.layers.GlobalAveragePooling1D()
self.classifier = tf.keras.layers.Dense(3)
def call(self, inputs, training=False):
x = self.transformer(inputs, training=training)[0]
x = self.dropout(x, training=training)
x = self.global_pool(x)
return self.classifier(x)
class Trainer:
def __init__(
self, ds_names, model_name, max_len=64,
batch_size_per_replica=16, prediction_batch_size_per_replica=64,
shuffle_buffer_size=1
):
global strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
print('Number of replicas:', strategy.num_replicas_in_sync)
self.ds_names = ds_names
self.model_name = model_name
self.max_len = max_len
self.batch_size_per_replica = batch_size_per_replica
self.prediction_batch_size_per_replica = prediction_batch_size_per_replica
self.batch_size = batch_size_per_replica * strategy.num_replicas_in_sync
self.prediction_batch_size = prediction_batch_size_per_replica * strategy.num_replicas_in_sync
self.shuffle_buffer_size = shuffle_buffer_size
train_ds, self.nb_examples = get_unbatched_dataset(
ds_names=ds_names, model_name=model_name, max_len=max_len
)
self.train_ds = get_batched_training_dataset(
train_ds, self.nb_examples, batch_size=self.batch_size,
shuffle_buffer_size=self.shuffle_buffer_size, repeat=True
)
valid_ds, self.nb_valid_examples = get_unbatched_dataset(
ds_names=['original valid'], model_name=model_name, max_len=max_len
)
self.valid_ds = get_prediction_dataset(valid_ds, self.prediction_batch_size)
self.valid_labels = next(iter(self.valid_ds.map(lambda inputs, label: label ).unbatch().batch(len(original_valid))))
test_ds, self.nb_test_examples = get_unbatched_dataset(
ds_names=['original test'], model_name=model_name, max_len=max_len
)
self.test_ds = get_prediction_dataset(test_ds, self.prediction_batch_size)
self.steps_per_epoch = self.nb_examples // self.batch_size
def get_model(self, model_name, lr, verbose=False):
with strategy.scope() :
model = Classifier(model_name)
model.trainable = True
dummy = model(tf.constant(1, shape=[1, 64]))
if verbose:
model.summary()
optimizer = tf.keras.optimizers.Adam(lr=lr)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction=tf.keras.losses.Reduction.SUM)
metrics = {
'train loss': tf.keras.metrics.Sum() ,
'train acc': tf.keras.metrics.SparseCategoricalAccuracy()
}
return model, loss_fn, optimizer, metrics
def get_routines(self, model, loss_fn, optimizer, metrics):
def train_1_step(batch):
inputs, labels = batch
with tf.GradientTape() as tape:
logits = model(inputs, training=True)
loss = loss_fn(labels, logits)/ self.batch_size
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
metrics['train loss'].update_state(loss)
metrics['train acc'].update_state(labels, logits)
@tf.function
def dist_train_1_epoch(data_iter):
for _ in tf.range(self.steps_per_epoch):
strategy.run(train_1_step, args=(next(data_iter),))
@tf.function
def predict_step(batch):
inputs, _ = batch
logits = model(inputs, training=False)
return logits
def predict_fn(dist_test_ds):
all_logits = []
for batch in dist_test_ds:
logits = strategy.run(predict_step, args=(batch,))
logits = strategy.experimental_local_results(logits)
logits = tf.concat(logits, axis=0)
all_logits.append(logits)
logits = tf.concat(all_logits, axis=0)
return logits
return dist_train_1_epoch, predict_fn
def train(self, train_name, model_name, epochs, verbose=False):
global strategy
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
print('Number of replicas:', strategy.num_replicas_in_sync)
model, loss_fn, optimizer, metrics = self.get_model(model_name, 1e-5, verbose=verbose)
dist_train_1_epoch, predict_fn = self.get_routines(model, loss_fn, optimizer, metrics)
train_dist_ds = strategy.experimental_distribute_dataset(self.train_ds)
train_dist_iter = iter(train_dist_ds)
dist_valid_ds = strategy.experimental_distribute_dataset(self.valid_ds)
dist_test_ds = strategy.experimental_distribute_dataset(self.test_ds)
history = {}
best_acc=0.5
for epoch in range(epochs):
s = datetime.datetime.now()
dist_train_1_epoch(train_dist_iter)
train_loss = metrics['train loss'].result() / self.steps_per_epoch
train_acc = metrics['train acc'].result()
metrics['train loss'].reset_states()
metrics['train acc'].reset_states()
print('epoch: {}
'.format(epoch + 1))
print('train loss: {}'.format(train_loss))
print('train acc: {}
'.format(train_acc))
e = datetime.datetime.now()
elapsed =(e - s ).total_seconds()
logits = predict_fn(dist_valid_ds)
valid_loss = tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(self.valid_labels, logits, from_logits=True, axis=-1))
valid_acc = tf.reduce_mean(tf.keras.metrics.sparse_categorical_accuracy(self.valid_labels, logits))
if valid_acc>best_acc:
best_acc=valid_acc
model.save_weights('best.h5')
print('valid loss: {}'.format(valid_loss))
print('valid acc: {}
'.format(valid_acc))
print('train timing: {}
'.format(elapsed))
history[epoch] = {
'train loss': float(train_loss),
'train acc': float(train_acc),
'valid loss': float(valid_loss),
'valid acc': float(valid_acc),
'train timing': elapsed
}
print('-' * 40)
print('best acc:{}'.format(best_acc))
model.load_weights('best.h5')
logits = predict_fn(dist_test_ds)
preds = tf.math.argmax(logits, axis=-1)
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv')
submission['prediction'] = preds.numpy()
submission.to_csv(f'submission-{train_name}.csv', index=False)
return history, submission,logits | Contradictory, My Dear Watson |
11,248,074 | predictions = pd.DataFrame({"xgb":xgb_preds, "lasso":lasso_preds})
predictions.plot(x = "xgb", y = "lasso", kind = "scatter" )<prepare_output> | def print_config(trainer):
print('nb.of training examples used: {}'.format(trainer.nb_examples))
print('nb.of valid examples used: {}'.format(trainer.nb_valid_examples))
print('nb.of test examples used: {}'.format(trainer.nb_test_examples))
print('per replica batch size for training: {}'.format(trainer.batch_size_per_replica))
print('batch size for training: {}'.format(trainer.batch_size))
print('per replica batch size for prediction: {}'.format(trainer.prediction_batch_size_per_replica))
print('batch size for prediction: {}'.format(trainer.prediction_batch_size))
print('steps per epoch: {}'.format(trainer.steps_per_epoch)) | Contradictory, My Dear Watson |
11,248,074 | preds = 0.7*lasso_preds + 0.3*xgb_preds<save_to_csv> | trainer = Trainer(
ds_names={'original train': None, 'xnli valid': None, 'mnli train': 60000, 'mnli valid 1': None, 'mnli valid 2': None}, model_name=model_name,
max_len=208, batch_size_per_replica=16, prediction_batch_size_per_replica=64,
shuffle_buffer_size=None
)
print_config(trainer)
train_name = f'{model_name} + extra-xnli-mnli'.replace('/', '-')
history_3, submission_3,preds = trainer.train(train_name=train_name, model_name=model_name, epochs=epochs, verbose=True ) | Contradictory, My Dear Watson |
11,248,074 | solution = pd.DataFrame({"id":test.Id, "SalePrice":preds})
solution.to_csv("ridge_sol.csv", index = False )<import_modules> | np.savez_compressed('preds',a=preds ) | Contradictory, My Dear Watson |
11,248,074 | <normalization><EOS> | s = pd.read_csv(f'submission-{train_name}.csv')
s.to_csv(f'submission.csv', index=False)
s.head(20 ) | Contradictory, My Dear Watson |
11,162,722 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<split> | !pip install -q --upgrade pip
!pip install --use-feature=2020-resolver -q transformers==3.0.2
!pip install -q googletrans
| Contradictory, My Dear Watson |
11,162,722 | X_tr, X_val, y_tr, y_val = train_test_split(X_train, y, random_state = 3 )<choose_model_class> | print(f"TensorFlow version: {tf.__version__}")
print(f"Transformers version: {transformers.__version__}")
warnings.filterwarnings("ignore")
| Contradictory, My Dear Watson |
11,162,722 | model = Sequential()
model.add(Dense(1, input_dim = X_train.shape[1], W_regularizer=l1(0.001)))
model.compile(loss = "mse", optimizer = "adam" )<train_model> | class Configuration() :
def __init__(
self,
model_name,
translation = True,
max_length = 64,
padding = True,
batch_size = 128,
epochs = 5,
learning_rate = 1e-5,
metrics = ["sparse_categorical_accuracy"],
verbose = 1,
train_splits = 5,
accelerator = "TPU",
myluckynumber = 13
):
self.SEED = myluckynumber
self.ACCELERATOR = accelerator
self.PATH_TRAIN = Path("/kaggle/input/contradictory-my-dear-watson/train.csv")
self.PATH_TEST = Path("/kaggle/input/contradictory-my-dear-watson/test.csv")
self.TRAIN_SPLITS = train_splits
self.LANGUAGE_MAP = {
"English" : 0,
"Chinese" : 1,
"Arabic" : 2,
"French" : 3,
"Swahili" : 4,
"Urdu" : 5,
"Vietnamese": 6,
"Russian" : 7,
"Hindi" : 8,
"Greek" : 9,
"Thai" : 10,
"Spanish" : 11,
"German" : 12,
"Turkish" : 13,
"Bulgarian" : 14
}
self.INVERSE_LANGUAGE_MAP = {v: k for k, v in self.LANGUAGE_MAP.items() }
self.MODEL_NAME = model_name
self.TRANSLATION = translation
self.TOKENIZER = AutoTokenizer.from_pretrained(self.MODEL_NAME)
self.MAX_LENGTH = max_length
self.PAD_TO_MAX_LENGTH = padding
self.BATCH_SIZE = batch_size
self.EPOCHS = epochs
self.LEARNING_RATE = learning_rate
self.METRICS = metrics
self.VERBOSE = verbose
self.initialize_accelerator()
def initialize_accelerator(self):
if self.ACCELERATOR == "TPU":
print("Connecting to TPU")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print(f"Running on TPU {tpu.master() }")
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("Initializing TPU")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
self.strategy = tf.distribute.experimental.TPUStrategy(tpu)
self.tpu = tpu
print("TPU initialized")
except _:
print("Failed to initialize TPU")
else:
print("Unable to initialize TPU")
self.ACCELERATOR = "GPU"
if self.ACCELERATOR != "TPU":
print("Using default strategy for CPU and single GPU")
self.strategy = tf.distribute.get_strategy()
if self.ACCELERATOR == "GPU":
print(f"GPUs Available: {len(tf.config.experimental.list_physical_devices('GPU')) }")
self.AUTO = tf.data.experimental.AUTOTUNE
self.REPLICAS = self.strategy.num_replicas_in_sync
print(f"REPLICAS: {self.REPLICAS}")
| Contradictory, My Dear Watson |
11,162,722 | hist = model.fit(X_tr, y_tr, validation_data =(X_val, y_val))<import_modules> | def translate_text_to_english(text):
translator = Translator()
return translator.translate(text, dest = "en" ).text
def encode_text(df, tokenizer, max_len, padding):
text = df[["premise", "hypothesis"]].values.tolist()
text_encoded = tokenizer.batch_encode_plus(
text,
pad_to_max_length = padding,
max_length = max_len
)
return text_encoded
def get_tf_dataset(X, y, auto, labelled = True, repeat = False, shuffle = False, batch_size = 128):
if labelled:
ds =(tf.data.Dataset.from_tensor_slices(( X["input_ids"], y)))
else:
ds =(tf.data.Dataset.from_tensor_slices(X["input_ids"]))
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(2048)
ds = ds.batch(batch_size)
ds = ds.prefetch(auto)
return ds
| Contradictory, My Dear Watson |
11,162,722 | print(os.listdir(".. /input"))
warnings.filterwarnings('ignore')
<load_from_csv> | def build_model(model_name, max_len, learning_rate, metrics):
input_ids = Input(shape =(max_len,), dtype = tf.int32, name = "input_ids")
transformer_model = TFAutoModel.from_pretrained(model_name)
transformer_embeddings = transformer_model(input_ids)[0]
output_values = Dense(3, activation = "softmax" )(transformer_embeddings[:, 0, :])
model = Model(inputs = input_ids, outputs = output_values)
opt = Adam(learning_rate = learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True)
metrics = metrics
model.compile(optimizer = opt, loss = loss, metrics = metrics)
return model
| Contradictory, My Dear Watson |
11,162,722 | train = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/train.csv")
train.head()<load_from_csv> | def run_model(config):
df_train = pd.read_csv(config.PATH_TRAIN)
df_test = pd.read_csv(config.PATH_TEST)
if config.TRANSLATION:
df_train.loc[df_train.language != "English", "premise"] = df_train[df_train.language != "English"].premise.apply(lambda x: translate_text_to_english(x))
df_test.loc[df_test.language != "English", "premise"] = df_test[df_test.language != "English"].premise.apply(lambda x: translate_text_to_english(x))
df_train.loc[df_train.language != "English", "hypothesis"] = df_train[df_train.language != "English"].hypothesis.apply(lambda x: translate_text_to_english(x))
df_test.loc[df_test.language != "English", "hypothesis"] = df_test[df_test.language != "English"].hypothesis.apply(lambda x: translate_text_to_english(x))
df_train["language_label"] = df_train.language.astype(str)+ "_" + df_train.label.astype(str)
skf = StratifiedKFold(n_splits = config.TRAIN_SPLITS, shuffle = True, random_state = config.SEED)
preds_oof = np.zeros(( df_train.shape[0], 3))
preds_test = np.zeros(( df_test.shape[0], 3))
acc_oof = []
for(fold,(train_index, valid_index)) in enumerate(skf.split(df_train, df_train.language_label)) :
if config.ACCELERATOR == "TPU":
if config.tpu:
config.initialize_accelerator()
K.clear_session()
with config.strategy.scope() :
model = build_model(config.MODEL_NAME, config.MAX_LENGTH, config.LEARNING_RATE, config.METRICS)
if fold == 0:
print(model.summary())
print("
")
print("
print(f"
print("
X_train = df_train.iloc[train_index]
X_valid = df_train.iloc[valid_index]
y_train = X_train.label.values
y_valid = X_valid.label.values
print("
Tokenizing")
X_train_encoded = encode_text(df = X_train, tokenizer = config.TOKENIZER, max_len = config.MAX_LENGTH, padding = config.PAD_TO_MAX_LENGTH)
X_valid_encoded = encode_text(df = X_valid, tokenizer = config.TOKENIZER, max_len = config.MAX_LENGTH, padding = config.PAD_TO_MAX_LENGTH)
ds_train = get_tf_dataset(X_train_encoded, y_train, config.AUTO, repeat = True, shuffle = True, batch_size = config.BATCH_SIZE * config.REPLICAS)
ds_valid = get_tf_dataset(X_valid_encoded, y_valid, config.AUTO, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
n_train = X_train.shape[0]
if fold == 0:
X_test_encoded = encode_text(df = df_test, tokenizer = config.TOKENIZER, max_len = config.MAX_LENGTH, padding = config.PAD_TO_MAX_LENGTH)
sv = tf.keras.callbacks.ModelCheckpoint(
"model.h5",
monitor = "val_sparse_categorical_accuracy",
verbose = 0,
save_best_only = True,
save_weights_only = True,
mode = "max",
save_freq = "epoch"
)
print("
Training")
model_history = model.fit(
ds_train,
epochs = config.EPOCHS,
callbacks = [sv],
steps_per_epoch = n_train / config.BATCH_SIZE // config.REPLICAS,
validation_data = ds_valid,
verbose = config.VERBOSE
)
print("
Validating")
model.load_weights("model.h5")
ds_valid = get_tf_dataset(X_valid_encoded, -1, config.AUTO, labelled = False, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
preds_valid = model.predict(ds_valid, verbose = config.VERBOSE)
acc = accuracy_score(y_valid, np.argmax(preds_valid, axis = 1))
preds_oof[valid_index] = preds_valid
acc_oof.append(acc)
print("
Inferencing")
ds_test = get_tf_dataset(X_test_encoded, -1, config.AUTO, labelled = False, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
preds_test += model.predict(ds_test, verbose = config.VERBOSE)/ config.TRAIN_SPLITS
print(f"
Fold {fold + 1} Accuracy: {round(acc, 4)}
")
g = gc.collect()
print(f"
CV Mean Accuracy: {round(np.mean(acc_oof), 4)}")
print(f"CV StdDev Accuracy: {round(np.std(acc_oof), 4)}
")
return preds_oof, preds_test
| Contradictory, My Dear Watson |
11,162,722 | test = pd.read_csv(".. /input/house-prices-advanced-regression-techniques/test.csv")
test.head()<count_missing_values> | config_1 = Configuration("jplu/tf-xlm-roberta-large", translation = False, max_length = 84, batch_size = 64, epochs = 16, train_splits = 4)
preds_train_1, preds_test_1 = run_model(config_1)
| Contradictory, My Dear Watson |
11,162,722 | missing_percentage(test )<sort_values> | df_test = pd.read_csv(config_1.PATH_TEST)
df_submission = pd.DataFrame({"id": df_test.id.values, "prediction": np.argmax(preds_test_1, axis = 1)})
df_submission.to_csv("submission.csv", index = False)
df_submission.prediction.value_counts()
| Contradictory, My Dear Watson |
11,104,798 | ( train.corr() **2)["SalePrice"].sort_values(ascending = False)[1:]<create_dataframe> | from transformers import BertTokenizer, TFBertModel
from transformers import RobertaTokenizer, TFRobertaModel
from transformers import XLMRobertaTokenizer, TFXLMRobertaModel
from transformers import AutoTokenizer
import matplotlib.pyplot as plt
import tensorflow as tf
from dask import bag, diagnostics
from sklearn.utils import shuffle | Contradictory, My Dear Watson |
11,104,798 | train = train[train.GrLivArea < 4500]
train.reset_index(drop = True, inplace = True)
previous_train = train.copy()<prepare_x_and_y> | !pip install --quiet googletrans
| Contradictory, My Dear Watson |
11,104,798 | train.drop(columns=['Id'],axis=1, inplace=True)
test.drop(columns=['Id'],axis=1, inplace=True)
y = train['SalePrice'].reset_index(drop=True)
previous_train = train.copy()<drop_column> | submission = pd.read_csv("/kaggle/input/output/submission(2 ).csv")
submission.head() | Contradictory, My Dear Watson |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.