kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
10,997,559 | all_data["PoolQC"] = all_data["PoolQC"].fillna("None")
all_data["MiscFeature"] = all_data["MiscFeature"].fillna("None")
all_data["Alley"] = all_data["Alley"].fillna("None")
all_data["Fence"] = all_data["Fence"].fillna("None")
all_data["FireplaceQu"] = all_data["FireplaceQu"].fillna("None")
for col in('GarageType', 'GarageFinish', 'GarageQual', 'GarageCond'):
all_data[col] = all_data[col].fillna('None')
for col in('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'):
all_data[col] = all_data[col].fillna('None')
all_data['MSZoning'] = all_data['MSZoning'].fillna(all_data['MSZoning'].mode() [0])
all_data["MasVnrType"] = all_data["MasVnrType"].fillna("None")
all_data["Functional"] = all_data["Functional"].fillna("Typ")
all_data['Electrical'] = all_data['Electrical'].fillna(all_data['Electrical'].mode() [0])
all_data['KitchenQual'] = all_data['KitchenQual'].fillna(all_data['KitchenQual'].mode() [0])
all_data['Exterior1st'] = all_data['Exterior1st'].fillna(all_data['Exterior1st'].mode() [0])
all_data['Exterior2nd'] = all_data['Exterior2nd'].fillna(all_data['Exterior2nd'].mode() [0])
all_data['SaleType'] = all_data['SaleType'].fillna(all_data['SaleType'].mode() [0])
all_data['MSSubClass'] = all_data['MSSubClass'].fillna("None" )<categorify> | tokenizer = AutoTokenizer.from_pretrained(MODEL ) | Contradictory, My Dear Watson |
10,997,559 | all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))
for col in('GarageYrBlt', 'GarageArea', 'GarageCars'):
all_data[col] = all_data[col].fillna(0)
for col in('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF','TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'):
all_data[col] = all_data[col].fillna(0)
all_data["MasVnrArea"] = all_data["MasVnrArea"].fillna(0 )<data_type_conversions> | def quick_encode(df,maxlen=100):
values = df[['premise','hypothesis']].values.tolist()
tokens=tokenizer.batch_encode_plus(values,max_length=maxlen,pad_to_max_length=True)
return np.array(tokens['input_ids'])
x_train = quick_encode(train)
x_test = quick_encode(test)
y_train = train.label.values | Contradictory, My Dear Watson |
10,997,559 | all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
all_data['YrSold'] = all_data['YrSold'].astype(str)
all_data['MoSold'] = all_data['MoSold'].astype(str )<feature_engineering> | def create_dist_dataset(X, y,val,batch_size= BATCH_SIZE):
dataset = tf.data.Dataset.from_tensor_slices(( X,y)).shuffle(len(X))
if not val:
dataset = dataset.repeat().batch(batch_size ).prefetch(AUTO)
else:
dataset = dataset.batch(batch_size ).prefetch(AUTO)
return dataset
test_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_test))
.batch(BATCH_SIZE)
) | Contradictory, My Dear Watson |
10,997,559 | all_data['TotalSF'] = all_data['TotalBsmtSF'] + all_data['1stFlrSF'] + all_data['2ndFlrSF']<feature_engineering> | def build_model(transformer,max_len):
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model | Contradictory, My Dear Watson |
10,997,559 | df_train["SalePrice"] = np.log1p(df_train["SalePrice"] )<feature_engineering> | n_steps = len(x_train)// batch_size
train_history = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=n_epochs
) | Contradictory, My Dear Watson |
10,997,559 | numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats] )<categorify> | pred_test=np.zeros(( test.shape[0],3))
skf = StratifiedKFold(n_splits=5,shuffle=True,random_state=777)
val_score=[]
history=[]
for fold,(train_ind,valid_ind)in enumerate(skf.split(x_train,y_train)) :
if fold < 4:
print("fold",fold+1)
tf.tpu.experimental.initialize_tpu_system(tpu)
train_data = create_dist_dataset(x_train[train_ind],y_train[train_ind],val=False)
valid_data = create_dist_dataset(x_train[valid_ind],y_train[valid_ind],val=True)
Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"roberta_base.h5", monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='min')
with strategy.scope() :
transformer_layer = TFAutoModel.from_pretrained(MODEL)
model = build_model(transformer_layer, max_len=MAX_LEN)
n_steps = len(train_ind)//BATCH_SIZE
print("training model {} ".format(fold+1))
train_history = model.fit(
train_data,
steps_per_epoch=n_steps,
validation_data=valid_data,
epochs=EPOCHS,callbacks=[Checkpoint],verbose=0)
print("Loading model...")
model.load_weights(f"roberta_base.h5")
print("fold {} validation acc {}".format(fold+1,np.mean(train_history.history['val_accuracy'])))
print("fold {} validation acc {}".format(fold+1,np.mean(train_history.history['val_loss'])))
history.append(train_history)
val_score.append(np.mean(train_history.history['val_accuracy']))
print('predict on test.... ')
preds=model.predict(test_dataset,verbose=1)
pred_test+=preds/4 | Contradictory, My Dear Watson |
10,997,559 | cols =('FireplaceQu', 'BsmtQual', 'BsmtCond', 'GarageQual', 'GarageCond',
'ExterQual', 'ExterCond','HeatingQC', 'PoolQC', 'KitchenQual', 'BsmtFinType1',
'BsmtFinType2', 'Functional', 'Fence', 'BsmtExposure', 'GarageFinish', 'LandSlope',
'LotShape', 'PavedDrive', 'Street', 'Alley', 'CentralAir', 'MSSubClass', 'OverallCond',
'YrSold', 'MoSold')
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))<categorify> | submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv')
submission['prediction'] = np.argmax(pred_test,axis=1)
submission.head() | Contradictory, My Dear Watson |
10,997,559 | <prepare_x_and_y><EOS> | submission.to_csv('submission.csv',index=False ) | Contradictory, My Dear Watson |
10,978,416 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<choose_model_class> | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
import re
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from keras import backend as k
from keras.utils import to_categorical
import transformers | Contradictory, My Dear Watson |
10,978,416 | lr = LinearRegression(
n_jobs = -1
)
rd = Ridge(
alpha = 4.84
)
rf = RandomForestRegressor(
n_estimators = 12,
max_depth = 3,
n_jobs = -1
)
gb = GradientBoostingRegressor(
n_estimators = 40,
max_depth = 2
)
nn = MLPRegressor(
hidden_layer_sizes =(90, 90),
alpha = 2.75
)<train_model> | DEVICE = 'TPU' | Contradictory, My Dear Watson |
10,978,416 | model = StackingRegressor(
regressors=[rf, gb, nn, rd],
meta_regressor=lr
)
model.fit(X_train, y )<predict_on_test> | if DEVICE == 'TPU':
print('Connecting to TPU...')
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU :',tpu.master())
except ValueError:
print('Could not connect to TPU')
tpu = None
if tpu:
try:
print('Initializing TPU...')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('TPU initialized!')
except _:
print('Failed to initialized TPU')
else:
DEVICE='GPU'
if DEVICE != 'TPU':
print('Using default strategy for CPU and single GPU')
strategy = tf.distribute.get_strategy()
if DEVICE == 'GPU':
print('Num GPUs available : ',len(tf.config.experimental.list_physical_devices('GPU')))
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print('REPLICAS : ',REPLICAS ) | Contradictory, My Dear Watson |
10,978,416 | y_pred = model.predict(X_train)
print(sqrt(mean_squared_error(y, y_pred)) )<predict_on_test> | Batch_size = 16 * strategy.num_replicas_in_sync
epochs = 13
AUTO = tf.data.experimental.AUTOTUNE
MODEL = 'jplu/tf-xlm-roberta-large' | Contradictory, My Dear Watson |
10,978,416 | Y_pred = model.predict(X_test )<save_to_csv> | train = pd.read_csv(r'.. /input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv(r'.. /input/contradictory-my-dear-watson/test.csv')
submission = pd.read_csv(r'.. /input/contradictory-my-dear-watson/sample_submission.csv' ) | Contradictory, My Dear Watson |
10,978,416 | sub = pd.DataFrame()
sub['Id'] = df_test['Id']
sub['SalePrice'] = np.expm1(Y_pred)
sub.to_csv('submission.csv',index=False )<load_from_csv> | test_num_lang = test.groupby('language')['id'].count().sort_values(ascending=False ).reset_index()
test_num_lang = pd.DataFrame(test_num_lang)
test_num_lang['count'] = test_num_lang['id']
test_num_lang = test_num_lang.drop('id',axis=1)
test_num_lang_data = test_num_lang.style.background_gradient(cmap='Oranges')
test_num_lang_data | Contradictory, My Dear Watson |
10,978,416 | print(check_output(["ls", ".. /input"] ).decode("utf8"))
train_df = pd.read_csv('.. /input/train.csv')
test_df = pd.read_csv('.. /input/test.csv')
<create_dataframe> | num_words_train_h = [None] * len(train)
for i in range(len(train)) :
num_words_train_h[i] = len(train['hypothesis'][i])
num_words_train_p = [None] * len(train)
for i in range(len(train)) :
num_words_train_p[i] = len(train['premise'][i])
num_words_test_h = [None] * len(test)
for i in range(len(test)) :
num_words_test_h[i] = len(test['hypothesis'][i])
num_words_test_p = [None] * len(test)
for i in range(len(test)) :
num_words_test_p[i] = len(test['premise'][i] ) | Contradictory, My Dear Watson |
10,978,416 | new_age = pd.DataFrame()
new_age['Age'] = train_df.Age.fillna(train_df.Age.mean())
new_age<import_modules> | train['num_words_hypothesis'] = num_words_train_h
train['num_words_premise'] = num_words_train_p
test['num_words_hypothesis'] = num_words_test_h
test['num_words_premise'] = num_words_test_p | Contradictory, My Dear Watson |
10,978,416 | import seaborn as sns
<groupby> | labels = train['label'].sort_values().value_counts().reset_index()
labels = pd.DataFrame(labels)
labels.columns = ['label','count']
labels_ = labels.style.background_gradient(cmap='Blues')
labels_ | Contradictory, My Dear Watson |
10,978,416 | train_df.groupby(['Sex'] ).Survived.sum()
<count_values> | target = train['label']
train = train.drop('label',axis=1)
train_text = [None] * len(train)
test_text = [None] * len(test)
for i in range(len(train)) :
train_text[i] = train['premise'][i] + ' ' + train['hypothesis'][i]
for i in range(len(test)) :
test_text[i] = test['premise'][i] + ' ' + test['hypothesis'][i] | Contradictory, My Dear Watson |
10,978,416 | print(train_df[train_df.Sex == 'female'].Survived.sum() /train_df[train_df.Sex == 'female'].Survived.count())
print(train_df[train_df.Sex == 'male'].Survived.sum() /train_df[train_df.Sex == 'male'].Survived.count() )<feature_engineering> | tokenizer = transformers.AutoTokenizer.from_pretrained(MODEL ) | Contradictory, My Dear Watson |
10,978,416 | data['Age'] = data.Age.fillna(data.Age.median())
data['Fare'] = data.Fare.fillna(data.Fare.median())
data.info()<categorify> | def roberta_encode(texts, maxlen=512):
enc_di = tokenizer.batch_encode_plus(
texts,
pad_to_max_length=True,
max_length=maxlen
)
return np.array(enc_di['input_ids'] ) | Contradictory, My Dear Watson |
10,978,416 | data = pd.get_dummies(data, columns=['Sex'], drop_first=True)
data.head()<prepare_x_and_y> | train_input_ids = roberta_encode(train_text,maxlen=100)
test_input_ids = roberta_encode(test_text,maxlen=100 ) | Contradictory, My Dear Watson |
10,978,416 | data_train = data.iloc[:891]
data_test = data.iloc[891:]
X = data_train.values
test = data_test.values
y = survived_train.values<import_modules> | train_input_ids,validation_input_ids,train_labels,validation_labels = train_test_split(train_input_ids,target,test_size=0.2 ) | Contradictory, My Dear Watson |
10,978,416 | from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV<train_model> | train_dataset =(
tf.data.Dataset
.from_tensor_slices(( train_input_ids,train_labels))
.repeat()
.shuffle(2048)
.batch(Batch_size)
.prefetch(AUTO)
)
validation_dataset =(
tf.data.Dataset
.from_tensor_slices(( validation_input_ids, validation_labels))
.batch(Batch_size)
.cache()
.prefetch(AUTO)
)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(test_input_ids)
.batch(Batch_size)
) | Contradictory, My Dear Watson |
10,978,416 | clf = tree.DecisionTreeClassifier(max_depth=3)
clf.fit(X, y )<save_to_csv> | def create_model(bert_model):
input_ids = tf.keras.Input(shape=(100,),dtype='int32')
output = bert_model(input_ids)[0]
output = output[:,0,:]
output = tf.keras.layers.Dense(3,activation='softmax' )(output)
model = tf.keras.models.Model(inputs = input_ids,outputs = output)
model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model | Contradictory, My Dear Watson |
10,978,416 | Y_pred = clf.predict(test)
test_df['Survived'] = Y_pred
test_df[['PassengerId', 'Survived']].to_csv('dec_tree.csv', index=False )<set_options> | with strategy.scope() :
bert_model =(
transformers.TFAutoModel
.from_pretrained(MODEL)
)
model = create_model(bert_model ) | Contradictory, My Dear Watson |
10,978,416 | sns.set_palette('deep' )<load_from_csv> | history = model.fit(train_dataset,
validation_data = validation_dataset,
epochs = epochs,
batch_size = Batch_size,
steps_per_epoch = len(train_input_ids)//Batch_size
) | Contradictory, My Dear Watson |
10,978,416 | <concatenate><EOS> | pred = model.predict(test_dataset,verbose=1)
print(len(pred))
pred = pred.argmax(axis=1)
submission.prediction = pred
submission.to_csv('submission.csv',index=False)
submission.head() | Contradictory, My Dear Watson |
10,962,695 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<train_model> | import os
import numpy as np
import pandas as pd
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import transformers
from transformers import TFAutoModel, AutoTokenizer
from tqdm.notebook import tqdm
import plotly.express as px | Contradictory, My Dear Watson |
10,962,695 | def fit(model, X, y, epochs=250, optim='adam', lr=0.001, verbose=0):
valid_optims = ['sgd', 'rmsprop', 'adam']
optim = optim.lower()
if optim.lower() not in valid_optims:
raise ValueError("invalid optimizer got '{0}' and expect one of {1}"
.format(optim, valid_optims))
loss_fn = torch.nn.BCEWithLogitsLoss()
optim = optim.lower()
if optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters() , lr=lr)
elif optim == 'rmsprop':
optimizer = torch.optim.RMSprop(model.parameters() , lr=lr)
elif optim == 'adam':
optimizer = torch.optim.Adam(model.parameters() , lr=lr)
for t in range(epochs):
logits = model(X)
loss = loss_fn(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
pred = torch.sigmoid(logits)
acc = torch.eq(y, pred.round_() ).cpu().float().mean().data[0]
if verbose > 1:
print("Epoch {0:>{2}}/{1}: Loss={3:.4f}, Accuracy={4:.4f}"
.format(t + 1, epochs, len(str(epochs)) , loss.data[0], acc))
if verbose > 0:
print("Training complete! Loss={0:.4f}, Accuracy={1:.4f}".format(loss.data[0], acc))
return {'loss': loss.data[0], 'acc': acc}<compute_train_metric> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
10,962,695 | def cross_val_score(model, X, y, cv=3, epochs=250, optim='adam', lr=0.001, use_cuda=True, verbose=0):
kfolds = KFold(cv, False ).split(X)
score = []
for fold,(train_idx, val_idx)in enumerate(kfolds):
X_train = X[train_idx]
y_train = y[train_idx]
X_val = X[val_idx]
y_val = y[val_idx]
X_train = Variable(torch.Tensor(X_train), requires_grad=True)
y_train = Variable(torch.Tensor(y_train), requires_grad=False ).unsqueeze_(-1)
X_val = Variable(torch.Tensor(X_val), requires_grad=False)
y_val = Variable(torch.Tensor(y_val), requires_grad=False ).unsqueeze_(-1)
model_train = copy.deepcopy(model)
if(use_cuda):
X_train = X_train.cuda()
y_train = y_train.cuda()
X_val = X_val.cuda()
y_val = y_val.cuda()
model_train = model_train.cuda()
metrics = fit(model_train, X_train, y_train, epochs=epochs, optim=optim,
lr=lr, verbose=0)
y_val_pred = torch.sigmoid(model_train(X_val))
acc = torch.eq(y_val, y_val_pred.round_() ).cpu().float().mean().data[0]
score.append(acc)
if verbose > 1:
print("Fold {0:>{2}}/{1}: Validation accuracy={3:.4f}"
.format(fold + 1, cv, len(str(cv)) , acc))
if verbose > 0:
print("Mean k-fold accuracy: {0:.4f}".format(np.mean(score)))
return score<train_on_grid> | model_name = 'jplu/tf-xlm-roberta-large'
n_epochs = 10
max_len = 80
batch_size = 16 * strategy.num_replicas_in_sync | Contradictory, My Dear Watson |
10,962,695 | def titanic_net_grid_search(X, y, param_grid, cv=3, epochs=250, use_cuda=True, verbose=0):
grid = list(( dict(zip(param_grid, param))
for param in itertools.product(*param_grid.values())))
n_candidates = len(grid)
if verbose > 0:
print("Fitting {0} folds for each of {1} candidates, totaling {2} fits"
.format(n_folds, n_candidates, n_folds * n_candidates))
print()
best_params = None
best_model = None
best_score = 0
for candidate, params in enumerate(grid):
if verbose == 1:
progress = "Candidate {0:>{2}}/{1}".format(candidate + 1, n_candidates,
len(str(n_candidates)))
print(progress, end="\r", flush=True)
elif verbose > 1:
print("Candidate", candidate + 1)
print("Parameters: {}".format(params))
d_in = X_train.shape[-1]
d_hidden = params['d_hidden']
n_hidden = params['n_hidden']
d_out = 1
model = titanic_net(d_in, d_hidden, n_hidden, d_out)
cv_score = cross_val_score(model, X_train, Y_train, cv = n_folds, epochs=epochs,
use_cuda=use_cuda, verbose=0)
cv_mean_acc = np.mean(cv_score)
if verbose > 1:
print("Mean CV accuracy: {0:.4f}".format(cv_mean_acc))
print()
if cv_mean_acc > best_score:
best_params = params
best_model = model
best_score = cv_mean_acc
if verbose > 0:
if verbose == 1:
print()
print("Best model")
print("Parameters: {}".format(best_params))
print("Mean CV accuracy: {0:.4f}".format(best_score))
return {'best_model': best_model, 'best_params': best_params, 'best_score': best_score}<train_model> | train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv' ) | Contradictory, My Dear Watson |
10,962,695 | X_train = np.array(train.drop(columns='Survived'))
Y_train = np.array(train['Survived'].astype(int))
X_test = np.array(test)
print("Training samples shape: {}".format(X_train.shape))
print("Training targets shape: {}".format(Y_train.shape))
print("Test samples shape: {}".format(X_test.shape))<train_on_grid> | tokenizer = AutoTokenizer.from_pretrained(model_name ) | Contradictory, My Dear Watson |
10,962,695 | n_folds = 10
grid = {
'n_hidden': [0, 3, 7, 10, 15],
'd_hidden': [3, 7, 10],
'lr': [0.001, 0.005, 0.01],
'optim': ['Adam']
}
best_candidate = titanic_net_grid_search(X_train, Y_train, grid, cv=n_folds,
epochs=500, verbose=1)
best_model = best_candidate['best_model']<train_model> | train_text = train[['premise', 'hypothesis']].values.tolist()
test_text = test[['premise', 'hypothesis']].values.tolist()
train_encoded = tokenizer.batch_encode_plus(
train_text,
pad_to_max_length=True,
max_length=max_len
)
test_encoded = tokenizer.batch_encode_plus(
test_text,
pad_to_max_length=True,
max_length=max_len
) | Contradictory, My Dear Watson |
10,962,695 | X_train_t = Variable(torch.Tensor(X_train), requires_grad=True)
y_train_t = Variable(torch.Tensor(Y_train), requires_grad=False ).unsqueeze_(-1)
X_test_t = Variable(torch.Tensor(X_test), requires_grad=False)
best_params = best_candidate["best_params"]
_ = fit(best_model, X_train_t, y_train_t, epochs=500,optim=best_params['optim'],
lr=best_params['lr'] )<save_to_csv> | x_train, x_valid, y_train, y_valid = train_test_split(
train_encoded['input_ids'], train.label.values,
test_size=0.2, random_state=2020
)
x_test = test_encoded['input_ids'] | Contradictory, My Dear Watson |
10,962,695 | prediction = torch.sigmoid(best_model(X_test_t)).data.round_().numpy().flatten()
_test = pd.read_csv(".. /input/titanic/test.csv")
submission_df = pd.DataFrame({'PassengerId': _test['PassengerId'], 'Survived': prediction.astype(int)})
submission_df.to_csv("submission.csv", index=False)
train.to_csv("submission_train.csv", index=False)
test.to_csv("submission_test.csv", index=False )<import_modules> | auto = tf.data.experimental.AUTOTUNE
train_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
) | Contradictory, My Dear Watson |
10,962,695 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
<load_from_csv> | with strategy.scope() :
transformer_encoder = TFAutoModel.from_pretrained(model_name)
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer_encoder(input_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.summary() | Contradictory, My Dear Watson |
10,962,695 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
print('The size of the training set: ', train.shape)
print('The size of the test set is: ' ,test.shape )<define_variables> | n_steps = len(x_train)// batch_size
train_history = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=n_epochs
) | Contradictory, My Dear Watson |
10,962,695 | Passenger_Id = test['PassengerId']<define_variables> | test_preds = model.predict(test_dataset, verbose=1)
submission['prediction'] = test_preds.argmax(axis=1 ) | Contradictory, My Dear Watson |
10,962,695 | <count_values><EOS> | submission.to_csv('submission.csv', index=False)
submission.head() | Contradictory, My Dear Watson |
10,973,244 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<count_values> | !pip install transformers==3.0.2 | Contradictory, My Dear Watson |
10,973,244 | test.dtypes.value_counts()<drop_column> | import tensorflow as tf
import transformers
from sklearn.model_selection import KFold | Contradictory, My Dear Watson |
10,973,244 | data = pd.concat(( train, test))
data.drop('Survived', axis = 1, inplace = True)
data.drop('PassengerId', axis = 1, inplace = True )<count_values> | transformers.__version__ | Contradictory, My Dear Watson |
10,973,244 | data.Sex.value_counts(normalize = True )<count_values> | from transformers import AutoTokenizer, TFAutoModel | Contradictory, My Dear Watson |
10,973,244 | embarked_counts = data.Embarked.value_counts(normalize = True)
embarked_counts<sort_values> | train_df = pd.read_csv("/kaggle/input/contradictory-my-dear-watson/train.csv")
test_df = pd.read_csv("/kaggle/input/contradictory-my-dear-watson/test.csv" ) | Contradictory, My Dear Watson |
10,973,244 | null_values = data.columns[data.isnull().any() ]
null_features = data[null_values].isnull().sum().sort_values(ascending = False)
missing_data = pd.DataFrame({'No of Nulls' :null_features})
missing_data<count_missing_values> | epochs = 3
maxlen = 50
model_name = "jplu/tf-xlm-roberta-large"
batch_size = 16 * strategy.num_replicas_in_sync | Contradictory, My Dear Watson |
10,973,244 | test.isnull().sum()<count_missing_values> | tokenizer = AutoTokenizer.from_pretrained(model_name ) | Contradictory, My Dear Watson |
10,973,244 | test.isnull().sum()<count_missing_values> | %%time
train_encode = tokenizer(list(train_df.premise.values), list(train_df.hypothesis.values),
max_length=maxlen, return_tensors="np", padding=True,
return_token_type_ids=True, return_attention_mask=True ) | Contradictory, My Dear Watson |
10,973,244 | train.isnull().sum()<count_missing_values> | def get_model(maxlen=50):
base_model = TFAutoModel.from_pretrained(model_name)
input_ids = tf.keras.layers.Input(shape =(maxlen,), dtype=tf.int32, name="input_ids")
input_type = tf.keras.layers.Input(shape =(maxlen,), dtype=tf.int32, name="token_type_ids")
input_mask = tf.keras.layers.Input(shape =(maxlen,), dtype=tf.int32, name="attention_mask")
embedding = base_model([input_ids, input_mask, input_type])[0]
print(embedding.shape)
output = tf.keras.layers.Dense(3, activation="softmax" )(embedding[:, 0, :])
model = tf.keras.models.Model(inputs=[input_ids, input_mask, input_type], outputs = output)
model.compile(tf.keras.optimizers.Adam(1e-5), "sparse_categorical_crossentropy", ["accuracy"])
return model | Contradictory, My Dear Watson |
10,973,244 | train.isnull().sum()<data_type_conversions> | %%time
ps = cls_model([train_encode['input_ids'][:10], train_encode['attention_mask'][:10], train_encode['token_type_ids'][:10]] ) | Contradictory, My Dear Watson |
10,973,244 | data["Embarked"] = data["Embarked"].fillna('S' )<filter> | fold = KFold(n_splits=3, shuffle=True, random_state=108 ) | Contradictory, My Dear Watson |
10,973,244 | data[data['Fare'].isnull() ]<data_type_conversions> | %%time
hists = []
models = []
for i,(train_idx, val_idx)in enumerate(fold.split(np.arange(train_df.label.shape[0]))):
print(f"----FOLD: {i+1}----
",train_idx, val_idx)
x_train = [train_encode['input_ids'][train_idx],
train_encode['attention_mask'][train_idx],
train_encode['token_type_ids'][train_idx]]
y_train = train_df.label.values[train_idx]
x_val = [train_encode['input_ids'][val_idx],
train_encode['attention_mask'][val_idx],
train_encode['token_type_ids'][val_idx]]
y_val = train_df.label.values[val_idx]
hist=cls_model.fit(x_train, y_train,
epochs=epochs,
batch_size = batch_size,
validation_data =(x_val, y_val),
)
hists.append(hist)
gc.collect() | Contradictory, My Dear Watson |
10,973,244 | def fill_missing_fare(df):
median_fare=df[(df['Pclass'] == 3)&(df['Embarked'] == 'S')]['Fare'].median()
df["Fare"] = df["Fare"].fillna(median_fare)
return df
data=fill_missing_fare(data )<define_search_space> | gc.collect() | Contradictory, My Dear Watson |
10,973,244 | sizeof_null = data["Age"].isnull().sum()
rand_age = np.random.randint(0, 80, size = sizeof_null )<data_type_conversions> | %%time
test_encode = tokenizer(list(test_df.premise.values), list(test_df.hypothesis.values),
max_length=maxlen, return_tensors="tf", padding=True,
return_token_type_ids=True, return_attention_mask=True ) | Contradictory, My Dear Watson |
10,973,244 | age_slice = data["Age"].copy()
age_slice[np.isnan(age_slice)] = rand_age
data["Age"] = age_slice
data["Age"] = data["Age"].astype(int )<feature_engineering> | ps = cls_model.predict([test_encode['input_ids'], test_encode['attention_mask'], test_encode['token_type_ids']],
verbose=1, batch_size=batch_size ) | Contradictory, My Dear Watson |
10,973,244 | data['Age'] = data['Age'].astype(int)
data.loc[ data['Age'] <= 18, 'Age'] = 0
data.loc[(data['Age'] > 18)&(data['Age'] <= 35), 'Age'] = 1
data.loc[(data['Age'] > 35)&(data['Age'] <= 60), 'Age'] = 2
data.loc[(data['Age'] > 60)&(data['Age'] <= 80), 'Age'] = 3
data['Age'].value_counts()<data_type_conversions> | submission = test_df.id.copy().to_frame()
submission['prediction'] = np.argmax(ps, 1 ) | Contradictory, My Dear Watson |
10,973,244 | <count_values><EOS> | submission.to_csv("submission.csv", index = False ) | Contradictory, My Dear Watson |
13,666,218 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<feature_engineering> | !pip install -q transformers==3.0.2
!pip install -q nlp | Contradictory, My Dear Watson |
13,666,218 | data.loc[ data['FamilySize'] == 1, 'FSize'] = 'Single family'
data.loc[(data['FamilySize'] > 1)&(data['FamilySize'] <= 5), 'FSize'] = 'Small Family'
data.loc[(data['FamilySize'] > 5), 'FSize'] = ' Extended Family'<categorify> | np.random.seed(12345 ) | Contradictory, My Dear Watson |
13,666,218 | le = LabelEncoder()
data['Sex'] = le.fit_transform(data['Sex'])
data['Embarked'] = le.fit_transform(data['Embarked'])
data['Deck'] = le.fit_transform(data['Deck'])
data['FSize'] = le.fit_transform(data['FSize'] )<count_values> | pd.set_option('display.max_colwidth', 100)
os.environ["WANDB_API_KEY"] = "0"
sns.set_context("talk", font_scale=1.05 ) | Contradictory, My Dear Watson |
13,666,218 | data.dtypes.value_counts()<drop_column> | train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv" ) | Contradictory, My Dear Watson |
13,666,218 | data = data.drop(['Name', 'Ticket','Cabin',], axis = 1 )<feature_engineering> | print("Any duplicate rows(train or test): ",
max(train['id'].nunique() != train.shape[0], test['id'].nunique() != test.shape[0])) | Contradictory, My Dear Watson |
13,666,218 | skewed = skew_cols.index.tolist()
data[skewed] = data[skewed].apply(np.log1p )<import_modules> | print("Train and test datasets have unique, non-overlapping ids: ",
pd.merge(train['id'], test['id'], on = 'id', how = 'inner' ).shape[0] == 0 ) | Contradictory, My Dear Watson |
13,666,218 | from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import VotingClassifier
<count_values> | print("% distribution by language - training dataset")
train['language'].value_counts(normalize=True)* 100 . | Contradictory, My Dear Watson |
13,666,218 | train_new.dtypes.value_counts()<count_values> | print("% distribution by language - test dataset")
test['language'].value_counts(normalize=True)* 100 . | Contradictory, My Dear Watson |
13,666,218 | test_new.dtypes.value_counts()<choose_model_class> | train['label_str'] = train['label'].map({0 : "entailment", 1 : "neutral", 2 : "contradiction"} ) | Contradictory, My Dear Watson |
13,666,218 | n_folds = 5
kf = KFold(n_folds, shuffle = True, random_state = 42 ).get_n_splits(train_new)
<compute_train_metric> | def _get_word_count(snt):
return len(str(snt ).split() ) | Contradictory, My Dear Watson |
13,666,218 | y_train = train.Survived
n_folds = 5
def f1_score(model):
kf = KFold(n_folds, shuffle = True, random_state = 42 ).get_n_splits(train_new)
rmse = np.sqrt(cross_val_score(model, train_new, y_train, scoring = 'f1', cv = kf))
return(rmse )<define_search_model> | train['premise_len'] = train['premise'].apply(lambda x: _get_word_count(x))
train['hypothesis_len'] = train['hypothesis'].apply(lambda x: _get_word_count(x))
train['relative_diff'] =(train['hypothesis_len'] - train['premise_len'])* 1./ train['premise_len'] | Contradictory, My Dear Watson |
13,666,218 | logreg = LogisticRegression()
rf = RandomForestClassifier()
gboost = GradientBoostingClassifier()
xgb = XGBClassifier()
lgbm = LGBMClassifier()<import_modules> | snli = load_dataset(path='snli' ) | Contradictory, My Dear Watson |
13,666,218 |
<compute_test_metric> | result_snli = []
for k in ['train', 'validation']:
for record in snli[k]:
c1, c2, c3 = record['premise'], record['hypothesis'], record['label']
if c1 and c2 and c3 in {0,1,2}:
result_snli.append(( c1,c2,'en','English',c3))
snli_df = pd.DataFrame(result_snli, columns=['premise','hypothesis','lang_abv', 'language', 'label'] ) | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(logreg)
print("
Logistic regression score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | pd.merge(train['premise'], snli_df['premise'], on = 'premise', how='inner' ).shape[0] != 0 | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(rf)
print("
Random Forest score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | final_df = shuffle(train.drop(columns=['id', 'label_str', 'premise_len', 'hypothesis_len', 'relative_diff'])).reset_index(drop = True ) | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(gboost)
print("
Gradient Boosting Classifier score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | X, y = final_df[['premise', 'hypothesis']].values.tolist() , final_df['label']
x_train, x_valid, y_train, y_valid = train_test_split(X, y, test_size=0.25, random_state=12345 ) | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(xgb)
print("
eXtreme Gradient BOOSTing score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<compute_test_metric> | del snli
gc.collect() | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(lgbm)
print("
Light Gradient Boosting score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<train_model> | encoder_handle = 'jplu/tf-xlm-roberta-large' | Contradictory, My Dear Watson |
13,666,218 | all_classifier = VotingClassifier(estimators=[('logreg', logreg),('rf', rf),
('gboost', gboost),('xgb', xgb),
('lgbm', lgbm)], voting='soft')
VC = all_classifier.fit(train_new, y_train )<compute_test_metric> | tokenizer = AutoTokenizer.from_pretrained(encoder_handle ) | Contradictory, My Dear Watson |
13,666,218 | score = f1_score(VC)
print("
Voting Classifier score: mean = {:.4f} std =({:.4f})
".format(score.mean() , score.std()))<predict_on_test> | max_len = 64
random_seed = 11887
learning_rate = 1e-5
epochs = 5
batch_size = 16 * strategy.num_replicas_in_sync
loss = 'sparse_categorical_crossentropy'
metrics = ['accuracy']
steps_per_epoch = 1000
auto = tf.data.experimental.AUTOTUNE | Contradictory, My Dear Watson |
13,666,218 | prediction = VC.predict(test_new )<save_to_csv> | def encode_sentence(s, tokenizer):
tokens = list(tokenizer.tokenize(s))
tokens.append(tokenizer.sep_token)
return tokenizer.convert_tokens_to_ids(tokens)
def tokenize(data, tokenizer, max_len):
PAD_ID = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
tokens1 = tf.ragged.constant([encode_sentence(s[0], tokenizer)for s in data], dtype=tf.int32)
tokens2 = tf.ragged.constant([encode_sentence(s[1], tokenizer)for s in data], dtype=tf.int32)
cls_label = [tokenizer.convert_tokens_to_ids([tokenizer.cls_token])]*tokens1.shape[0]
tokens = tf.concat([cls_label, tokens1, tokens2], axis=-1)
tokens = tokens[:, :max_len]
tokens = tokens.to_tensor(default_value=PAD_ID)
pad = max_len - tf.shape(tokens)[1]
tokens = tf.pad(tokens, [[0, 0], [0, pad]], constant_values=PAD_ID)
input_word_ids = tf.reshape(tokens, [-1, max_len])
input_mask = tf.cast(input_word_ids != PAD_ID, tf.int32)
input_mask = tf.reshape(input_mask, [-1, max_len])
input_type_ids = tf.concat([tf.zeros_like(cls_label), tf.zeros_like(tokens1), tf.ones_like(tokens2)], axis=-1 ).to_tensor()
inputs = {
'input_word_ids': input_word_ids,
'input_mask': input_mask,
'input_type_ids': input_type_ids}
return inputs | Contradictory, My Dear Watson |
13,666,218 | titanic_submission = pd.DataFrame({"PassengerId": test["PassengerId"],
"Survived": prediction})
titanic_submission.to_csv('Titanic_Submission.csv', index = False)
titanic_submission.sample(10)
<import_modules> | def build_dataset(x, y, mode, batch_size):
if mode == "train":
dataset =(
tf.data.Dataset
.from_tensor_slices(( x, y))
.repeat()
.shuffle(5678)
.batch(batch_size)
.prefetch(auto)
)
elif mode == "valid":
dataset =(
tf.data.Dataset
.from_tensor_slices(( x, y))
.batch(batch_size)
.cache()
.prefetch(auto)
)
elif mode == "test":
dataset =(
tf.data.Dataset
.from_tensor_slices(x)
.batch(batch_size)
)
else:
raise NotImplementedError
return dataset | Contradictory, My Dear Watson |
13,666,218 | import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier<load_from_csv> | x_train_ = tokenize(x_train, tokenizer, max_len)
x_valid_ = tokenize(x_valid, tokenizer, max_len ) | Contradictory, My Dear Watson |
13,666,218 | train = pd.read_csv('.. /input/train.csv')
test = pd.read_csv('.. /input/test.csv')
full_data = [train, test]<count_missing_values> | train_dataset = build_dataset(x_train_, y_train, "train", batch_size)
valid_dataset = build_dataset(x_valid_, y_valid, "valid", batch_size ) | Contradictory, My Dear Watson |
13,666,218 | print(train.isnull().sum())
print('_'*40)
print(test.isnull().sum() )<feature_engineering> | def build_model(encoder_handle, random_seed, learning_rate, loss, metrics, max_len):
tf.keras.backend.clear_session()
tf.random.set_seed(random_seed)
with strategy.scope() :
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
roberta = TFXLMRobertaModel.from_pretrained(encoder_handle)
roberta = roberta([input_word_ids, input_mask])[0]
out = GlobalAveragePooling1D()(roberta)
out = Dense(3, activation='softmax' )(out)
model = Model(inputs=[input_word_ids, input_mask], outputs = out)
model.compile(optimizer=Adam(lr=learning_rate), loss=loss, metrics=metrics)
model.summary()
return model | Contradictory, My Dear Watson |
13,666,218 | PassengerId = test['PassengerId']
for dataset in full_data:
dataset['Has_Cabin'] = dataset["Cabin"].apply(lambda x: 0 if type(x)== float else 1)
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
dataset['Sex'] = dataset['Sex'].replace(['male', 'female'], [0, 1])
master_ave = dataset.loc[dataset.Name.str.contains('Master'), 'Age'].mean()
mr_ave = dataset.loc[dataset.Name.str.contains('Mr'), 'Age'].mean()
miss_ave = dataset.loc[dataset.Name.str.contains('Miss'), 'Age'].mean()
mrs_ave = dataset.loc[dataset.Name.str.contains('Mrs'), 'Age'].mean()
dataset.loc[dataset.Name.str.contains('Mraster')& dataset.Age.isnull() , 'Age'] = master_ave
dataset.loc[dataset.Name.str.contains('Mr')& dataset.Age.isnull() , 'Age'] = mr_ave
dataset.loc[dataset.Name.str.contains('Miss')& dataset.Age.isnull() , 'Age'] = miss_ave
dataset.loc[dataset.Name.str.contains('Mrs')& dataset.Age.isnull() , 'Age'] = mrs_ave
dataset['Age'] = dataset['Age'].fillna(dataset['Age'].mean())
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16)&(dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32)&(dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48)&(dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age'] = 4
dataset['Age'] = dataset['Age'].astype(int)
dataset['Fare'] = dataset['Fare'].fillna(dataset['Fare'].mean())
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91)&(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454)&(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
dataset['Embarked'] = dataset['Embarked'].fillna('S')
dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2} ).astype(int)
dataset.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1, inplace=True )<count_missing_values> | model = build_model(encoder_handle, random_seed, learning_rate, loss, metrics, max_len ) | Contradictory, My Dear Watson |
13,666,218 | print(train.isnull().sum())
print('_'*40)
print(test.isnull().sum() )<prepare_x_and_y> | early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
verbose=1,
patience=2,
mode='min',
restore_best_weights=True ) | Contradictory, My Dear Watson |
13,666,218 | trn_x = train.drop(['Survived'], axis=1, inplace=False)
trn_y = train['Survived']<train_model> | x_small_train, y_small_train = X[:batch_size], y[:batch_size] | Contradictory, My Dear Watson |
13,666,218 | pred = np.zeros(( test.shape[0], 10))
for i in range(10):
model = RandomForestClassifier(
max_depth = 10,
max_features =10,
min_samples_split = 15,
n_estimators = 10,
n_jobs = -1,
random_state = i)
model.fit(trn_x, trn_y)
pred[:,i] = model.predict_proba(test)[:,1].reshape(test.shape[0],)
output = pred.mean(axis=1)
output[output >= 0.5] = 1
output[output < 0.5] = 0
output = output.astype(int )<save_to_csv> | x_small_train = tokenize(x_small_train, tokenizer, max_len)
small_train_dataset = build_dataset(x_small_train, y_small_train, "train", batch_size ) | Contradictory, My Dear Watson |
13,666,218 | df_out = pd.DataFrame({ 'PassengerId': PassengerId,
'Survived': output })
df_out.to_csv("submission.csv", index=False )<set_options> | history_small_train = model.fit(small_train_dataset,
steps_per_epoch=100,
epochs=3 ) | Contradictory, My Dear Watson |
13,666,218 | warnings.filterwarnings("ignore" )<load_from_csv> | model = build_model(encoder_handle, random_seed, learning_rate, loss, metrics, max_len ) | Contradictory, My Dear Watson |
13,666,218 | traindata = pd.read_csv('/kaggle/input/titanic/train.csv')
testdata = pd.read_csv('/kaggle/input/titanic/test.csv' )<categorify> | history = model.fit(train_dataset,
validation_data=valid_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[early_stopping] ) | Contradictory, My Dear Watson |
13,666,218 | le = LabelEncoder()
traindata['Sex'] = le.fit_transform(traindata['Sex'])
testdata['Sex'] = le.fit_transform(testdata['Sex'])
traindata['Embarked'] = le.fit_transform(traindata['Embarked'].astype(str))
testdata['Embarked'] = le.fit_transform(testdata['Embarked'].astype(str))<prepare_x_and_y> | x_test = tokenize(test[['premise', 'hypothesis']].values.tolist() , tokenizer, max_len)
test_dataset = build_dataset(x_test, None, "test", batch_size ) | Contradictory, My Dear Watson |
13,666,218 | X = traindata[['Pclass','Sex','Age','Fare','Parch','SibSp','Embarked']].values
y = traindata.iloc[:,1].values
X_real_test = testdata[['Pclass','Sex','Age','Fare','Parch','SibSp','Embarked']].values<count_missing_values> | predictions_prob = model.predict(test_dataset)
final = predictions_prob.argmax(axis=-1)
submission = pd.DataFrame()
submission['id'] = test['id']
submission['prediction'] = final.astype(np.int32 ) | Contradictory, My Dear Watson |
13,666,218 | <categorify><EOS> | submission.to_csv("submission.csv", index = False ) | Contradictory, My Dear Watson |
13,511,536 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<split> | !pip install sentence-transformers
!pip install pandas
!pip install torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchaudio==0.7.2 -f https://download.pytorch.org/whl/torch_stable.html | Contradictory, My Dear Watson |
13,511,536 | <normalization><EOS> |
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
handlers=[LoggingHandler() ])
dataset_path = '.. /input/contradictory-my-dear-watson/train.csv'
logging.info("Read AllNLI train dataset")
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
train_data = pandas.read_csv(dataset_path)
train_data['label'] = train_data['label'].replace([0, 2], [2, 0])
for id, row in train_data.iterrows() :
label_id = int(row['label'])
train_samples.append(InputExample(texts=[row['premise'], row['hypothesis']], label=label_id))
train_batch_size = 16
num_epochs = 20
model_save_path = './training_allnli-' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
model = CrossEncoder('sentence-transformers/xlm-r-100langs-bert-base-nli-mean-tokens', num_labels=len(label2int))
train_dataloader = DataLoader(train_samples, shuffle=True, batch_size=train_batch_size)
evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev_samples, name='AllNLI-dev')
warmup_steps = math.ceil(len(train_dataloader)* num_epochs * 0.1)
logging.info("Warmup-steps: {}".format(warmup_steps))
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path)
test_dataset = '.. /input/contradictory-my-dear-watson/test.csv'
df = pandas.read_csv(test_dataset)
sentence_pairs = []
ids = []
for id, row in df.iterrows() :
label_id = 0
ids.append(row['id'])
sentence_pairs.append([row['premise'], row['hypothesis']])
pred_scores = model.predict(sentence_pairs, convert_to_numpy=True, show_progress_bar=False,
batch_size=train_batch_size)
pred_labels = np.argmax(pred_scores, axis=1)
out_df = pandas.DataFrame([ids, pred_labels] ).transpose()
out_df = out_df.rename(columns={0: 'id', 1: 'prediction'})
out_df['prediction'] = out_df['prediction'].replace([0, 2],[2, 0])
out_df.to_csv('submission.csv', index=False)
| Contradictory, My Dear Watson |
11,775,677 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<predict_on_test> | !pip install googletrans textAugment | Contradictory, My Dear Watson |
11,775,677 | classifier = SVC(kernel='rbf',random_state=0)
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test,y_pred)
print(cm )<save_to_csv> | os.environ["WANDB_API_KEY"] = "0"
| Contradictory, My Dear Watson |
11,775,677 | y_pred_test = classifier.predict(X_real_test)
pid = testdata[['PassengerId']].values
res = np.expand_dims(y_pred_test,axis=1)
f = np.hstack(( pid,res))
df = pd.DataFrame(f, columns = ['PassengerId', 'Survived'])
df.to_csv('gender_submission.csv', index=False )<set_options> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print('Number of replicas:', strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
11,775,677 | pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.3f}'.format
<load_from_csv> | df_train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
df_test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv' ) | Contradictory, My Dear Watson |
11,775,677 | dataframe = pd.read_csv(".. /input/train.csv", sep=",")
dataframe.head(10)
<create_dataframe> | def clean_word(value):
language = value[0]
word = value[1]
if language != 'English':
word = word.lower()
return word
word = word.lower()
word = re.sub(r'\?\?', 'e', word)
word = re.sub('\.\.\.', '.', word)
word = re.sub('\/', ' ', word)
word = re.sub('--', ' ', word)
word = re.sub('/\xad', '', word)
word = word.strip(' ')
return word
df_train['premise'] = df_train[['language', 'premise']].apply(lambda v: clean_word(v), axis=1)
df_train['hypothesis'] = df_train[['language', 'hypothesis']].apply(lambda v: clean_word(v), axis=1)
df_test['premise'] = df_test[['language', 'premise']].apply(lambda v: clean_word(v), axis=1)
df_test['hypothesis'] = df_test[['language', 'hypothesis']].apply(lambda v: clean_word(v), axis=1)
languages = [ 'zh-cn' if lang == 'zh' else lang for lang in df_train['lang_abv'].unique() ] | Contradictory, My Dear Watson |
11,775,677 | print('null value summary:')
dataframe.isnull().sum().sort_values(ascending=False)
<create_dataframe> | seed = 42
tf.random.set_seed(seed)
model_name = 'jplu/tf-xlm-roberta-large'
tokenizer = XLMRobertaTokenizer.from_pretrained(model_name)
def build_model() :
with strategy.scope() :
bert_encoder = TFXLMRobertaModel.from_pretrained(model_name)
input_word_ids = tf.keras.Input(shape=(None,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(None,), dtype=tf.int32, name="input_mask")
embedding = bert_encoder([input_word_ids, input_mask])[0]
output_layer = tf.keras.layers.Dropout(0.25 )(embedding)
output_layer = tf.keras.layers.GlobalAveragePooling1D()(output_layer)
output_dense_layer = tf.keras.layers.Dense(64, activation='relu' )(output_layer)
output_dense_layer = tf.keras.layers.Dense(32, activation='relu' )(output_dense_layer)
output = tf.keras.layers.Dense(3, activation='softmax' )(output_dense_layer)
model = tf.keras.Model(inputs=[input_word_ids, input_mask], outputs=output)
model.compile(tf.keras.optimizers.Adam(lr=1e-5), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
with strategy.scope() :
model = build_model()
model.summary() | Contradictory, My Dear Watson |
11,775,677 | dataframe = dataframe.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', 'Fare'])
<categorify> | batch_size = 8 * strategy.num_replicas_in_sync
num_splits = 5
test_input = None | Contradictory, My Dear Watson |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.