kernel_id
int64 24.2k
23.3M
| prompt
stringlengths 8
1.85M
| completetion
stringlengths 1
182k
| comp_name
stringlengths 5
57
|
---|---|---|---|
11,148,955 | n_test_df = pd.get_dummies(n_test_df, drop_first=True )<split> | with strategy.scope() :
x_input = tf.keras.Input(shape=(MAX_LEN,), dtype=tf.int32, name="input_ids")
x = backbone(x_input)[0]
x = x[:, 0, :]
x = tf.keras.layers.Dense(3, activation='softmax' )(x)
model = tf.keras.models.Model(inputs=x_input, outputs=x ) | Contradictory, My Dear Watson |
11,148,955 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42 )<define_search_space> | model.compile(
tf.keras.optimizers.Adam(lr=RATE),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
) | Contradictory, My Dear Watson |
11,148,955 | parameters = {"learning_rate": [0.1, 0.01, 0.001],
"gamma" : [0.01, 0.1, 0.3, 0.5, 1, 1.5, 2],
"max_depth": [2, 4, 7, 10],
"colsample_bytree": [0.3, 0.6, 0.8, 1.0],
"subsample": [0.2, 0.4, 0.5, 0.6, 0.7],
"reg_alpha": [0, 0.5, 1],
"reg_lambda": [1, 1.5, 2, 3, 4.5],
"min_child_weight": [1, 3, 5, 7],
"n_estimators": [100, 250, 500, 1000]}
xgb_rscv = RandomizedSearchCV(XGBClassifier() , param_distributions = parameters, cv = 7, verbose = 3, random_state = 40)
model_rscv = xgb_rscv.fit(X_train, y_train)
model_rscv.best_params_<train_model> | steps = len(x_train)// BATCH_SIZE
history = model.fit(
train_dataset,
validation_data=valid_dataset,
epochs=EPOCHS,
steps_per_epoch=steps,
) | Contradictory, My Dear Watson |
11,148,955 | tuned_model = XGBClassifier(booster='gbtree', subsample= 0.7,
reg_lambda= 3,
reg_alpha= 1,
n_estimators= 100,
min_child_weight= 3,
max_depth= 10,
learning_rate = 0.001,
gamma= 0.01,
colsample_bytree= 0.6)
tuned_model.fit(X_train, y_train )<compute_test_metric> | test_preds = model.predict(test_dataset, verbose=1)
submission['prediction'] = test_preds.argmax(axis=1 ) | Contradictory, My Dear Watson |
11,148,955 | tuned_model.score(X_test, y_test )<save_to_csv> | submission.to_csv('submission.csv', index=False ) | Contradictory, My Dear Watson |
11,148,955 | predictions = tuned_model.predict(n_test_df)
output = pd.DataFrame({'PassengerId': test_df.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" )<install_modules> | submission.to_csv('submission.csv', index=False ) | Contradictory, My Dear Watson |
11,164,561 | %%time
!cp.. /input/rapids/rapids.0.13.0 /opt/conda/envs/rapids.tar.gz
!cd /opt/conda/envs/ && tar -xzvf rapids.tar.gz
sys.path = ["/opt/conda/envs/rapids/lib"] + ["/opt/conda/envs/rapids/lib/python3.6"] + ["/opt/conda/envs/rapids/lib/python3.6/site-packages"] + sys.path
!cp /opt/conda/envs/rapids/lib/libxgboost.so /opt/conda/lib/<import_modules> | os.environ["WANDB_API_KEY"] = "0" | Contradictory, My Dear Watson |
11,164,561 | import cupy, cudf, cuml
from cuml.linear_model import LogisticRegression
from cuml.ensemble import RandomForestClassifier
from cuml.svm import SVC
import os<load_from_csv> | SEED=42
max_len = 48
EPOCHS = 3
BATCH_SIZE = 64
LR = 1e-5 | Contradictory, My Dear Watson |
11,164,561 | train = cudf.read_csv('/kaggle/input/titanic/train.csv')
test = cudf.read_csv('/kaggle/input/titanic/test.csv' )<drop_column> | from transformers import BertTokenizer, TFBertModel,TFAutoModel,TFXLMRobertaModel, AutoTokenizer
import matplotlib.pyplot as plt
import tensorflow as tf | Contradictory, My Dear Watson |
11,164,561 | train = train.drop(columns= ['Name','Ticket','Cabin'])
test = test.drop(columns= ['Name','Ticket','Cabin'] )<data_type_conversions> | train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv" ) | Contradictory, My Dear Watson |
11,164,561 | train['Embarked_S'] =(train['Embarked'] == 'S' ).astype(int)
train['Embarked_C'] =(train['Embarked'] == 'C' ).astype(int)
train['Embarked_Q'] =(train['Embarked'] == 'Q' ).astype(int)
train['Gender'] =(train['Sex'] == 'male' ).astype(int )<data_type_conversions> | model_name = 'jplu/tf-xlm-roberta-large'
tokenizer = AutoTokenizer.from_pretrained(model_name)
| Contradictory, My Dear Watson |
11,164,561 | test['Embarked_S'] =(test['Embarked'] == 'S' ).astype(int)
test['Embarked_C'] =(test['Embarked'] == 'C' ).astype(int)
test['Embarked_Q'] =(test['Embarked'] == 'Q' ).astype(int)
test['Gender'] =(test['Sex'] == 'male' ).astype(int )<drop_column> | def encode_sentence(s):
tokens = list(tokenizer.tokenize(s))
tokens.append('[SEP]')
return tokenizer.convert_tokens_to_ids(tokens ) | Contradictory, My Dear Watson |
11,164,561 | train = train.drop(columns= ['Embarked','Sex',])
test = test.drop(columns= ['Embarked','Sex',])
<train_model> | encode_sentence("I love machine learning" ) | Contradictory, My Dear Watson |
11,164,561 | train.fillna(0,inplace=True)
test.fillna(0,inplace=True )<prepare_x_and_y> | def bert_encode(hypotheses, premises, tokenizer):
num_examples = len(hypotheses)
sentence1 = tf.ragged.constant([
encode_sentence(s)
for s in np.array(hypotheses)])
sentence2 = tf.ragged.constant([
encode_sentence(s)
for s in np.array(premises)])
cls = [tokenizer.convert_tokens_to_ids(['[CLS]'])]*sentence1.shape[0]
input_word_ids = tf.concat([cls, sentence1, sentence2], axis=-1)
input_mask = tf.ones_like(input_word_ids ).to_tensor()
type_cls = tf.zeros_like(cls)
type_s1 = tf.zeros_like(sentence1)
type_s2 = tf.ones_like(sentence2)
input_type_ids = tf.concat(
[type_cls, type_s1, type_s2], axis=-1 ).to_tensor()
inputs = {
'input_word_ids': input_word_ids.to_tensor() ,
'input_mask': input_mask,
'input_type_ids': input_type_ids}
return inputs | Contradictory, My Dear Watson |
11,164,561 | X = train.drop(columns = ['Survived'])
y = train['Survived'].astype('int32' )<train_model> | train_input = bert_encode(train.premise.values, train.hypothesis.values, tokenizer ) | Contradictory, My Dear Watson |
11,164,561 | model = RandomForestClassifier(n_estimators = 100, max_depth = 6)
model.fit(X, y )<predict_on_test> | def build_model() :
bert_encoder = TFXLMRobertaModel.from_pretrained(model_name)
input_word_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_word_ids")
input_mask = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_mask")
input_type_ids = tf.keras.Input(shape=(max_len,), dtype=tf.int32, name="input_type_ids")
embedding = bert_encoder([input_word_ids, input_mask, input_type_ids])[0]
output = tf.keras.layers.Dense(3, activation='softmax' )(embedding[:,0,:])
model = tf.keras.Model(inputs=[input_word_ids, input_mask, input_type_ids], outputs=output)
model.compile(tf.keras.optimizers.Adam(lr=LR), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model | Contradictory, My Dear Watson |
11,164,561 | yhat_train = model.predict(X, predict_model = 'CPU')
yhat_test = model.predict(test, predict_model = 'CPU' )<count_values> | eas = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=3,
verbose=1, mode='min', baseline=None, restore_best_weights=True ) | Contradictory, My Dear Watson |
11,164,561 | print(sum(y == yhat_train)/ len(y))<save_to_csv> | model.fit(train_input, train.label.values, epochs = EPOCHS, verbose = 1, batch_size = BATCH_SIZE, validation_split = 0.2, callbacks = [eas])
| Contradictory, My Dear Watson |
11,164,561 | submission = cudf.DataFrame({'PassengerId': test.PassengerId, 'Survived': yhat_test})
submission.to_csv('submission.csv', index = False )<load_from_csv> | test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv")
test_input = bert_encode(test.premise.values, test.hypothesis.values, tokenizer ) | Contradictory, My Dear Watson |
11,164,561 | train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
<load_from_csv> | predictions = [np.argmax(i)for i in model.predict(test_input)]
| Contradictory, My Dear Watson |
11,164,561 | test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head(20 )<feature_engineering> | submission = test.id.copy().to_frame()
submission['prediction'] = predictions | Contradictory, My Dear Watson |
11,164,561 | <feature_engineering><EOS> | submission.to_csv("submission.csv", index = False ) | Contradictory, My Dear Watson |
11,093,125 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<categorify> | os.environ["WANDB_API_KEY"] = "0"
!pip uninstall -y transformers
!pip install transformers
!pip install nlp
strategy = None | Contradictory, My Dear Watson |
11,093,125 | def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age<feature_engineering> | original_train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
original_train = sklearn.utils.shuffle(original_train)
original_train = sklearn.utils.shuffle(original_train)
validation_ratio = 0.2
nb_valid_examples = max(1, int(len(original_train)* validation_ratio))
original_valid = original_train[:nb_valid_examples]
original_train = original_train[nb_valid_examples:] | Contradictory, My Dear Watson |
11,093,125 | train_data['Age'] = train_data[['Age','Pclass']].apply(impute_age,axis=1 )<feature_engineering> | print(f"original - training: {len(original_train)} examples")
original_train.head(10 ) | Contradictory, My Dear Watson |
11,093,125 | test_data['Age'] = test_data[['Age','Pclass']].apply(impute_age,axis=1 )<save_to_csv> | print(f"original - validation: {len(original_valid)} examples")
original_valid.head(10 ) | Contradictory, My Dear Watson |
11,093,125 | y = train_data["Survived"]
features = ["Pclass","Sex","Age", "Family_tot", "Fare"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" )<set_options> | original_test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv")
print(f"original - test: {len(original_test)} examples")
original_test.head(10 ) | Contradictory, My Dear Watson |
11,093,125 | if not sys.warnoptions:
warnings.simplefilter("ignore")
warnings.filterwarnings("ignore")
<compute_test_metric> | mnli = nlp.load_dataset(path='glue', name='mnli' ) | Contradictory, My Dear Watson |
11,093,125 | def RMSLE(pred,actual):
return np.sqrt(np.mean(np.power(( np.log(pred+1)-np.log(actual+1)) ,2)) )<load_from_csv> | print(mnli, '
')
print('The split names in MNLI dataset:')
for k in mnli:
print(' ', k)
print("
mnli['train'] is ", type(mnli['train']))
mnli['train'] | Contradictory, My Dear Watson |
11,093,125 | pd.set_option('mode.chained_assignment', None)
test = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/test.csv")
train = pd.read_csv("/kaggle/input/covid19-global-forecasting-week-3/train.csv")
train['Province_State'].fillna('', inplace=True)
test['Province_State'].fillna('', inplace=True)
train['Date'] = pd.to_datetime(train['Date'])
test['Date'] = pd.to_datetime(test['Date'])
train = train.sort_values(['Country_Region','Province_State','Date'])
test = test.sort_values(['Country_Region','Province_State','Date'] )<feature_engineering> | print('The number of training examples in mnli dataset:', mnli['train'].num_rows)
print('The number of validation examples in mnli dataset - part 1:', mnli['validation_matched'].num_rows)
print('The number of validation examples in mnli dataset - part 2:', mnli['validation_mismatched'].num_rows, '
')
print('The class names in mnli dataset:', mnli['train'].features['label'].names)
print('The feature names in mnli dataset:', list(mnli['train'].features.keys()), '
')
for elt in mnli['train']:
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', mnli['train'].features['label'].names[elt['label']])
print('idx', elt['idx'])
print('-' * 80)
if elt['idx'] >= 10:
break | Contradictory, My Dear Watson |
11,093,125 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = ExponentialSmoothing(adjusted_y_train_confirmed, trend = 'additive' ).fit()
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = ExponentialSmoothing(adjusted_y_train_fatalities, trend = 'additive' ).fit()
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_1 = df_val.copy()<feature_engineering> | mnli_train_df = pd.DataFrame(mnli['train'])
mnli_valid_1_df = pd.DataFrame(mnli['validation_matched'])
mnli_valid_2_df = pd.DataFrame(mnli['validation_mismatched'])
mnli_train_df = mnli_train_df[['premise', 'hypothesis', 'label']]
mnli_valid_1_df = mnli_valid_1_df[['premise', 'hypothesis', 'label']]
mnli_valid_2_df = mnli_valid_2_df[['premise', 'hypothesis', 'label']]
mnli_train_df['lang_abv'] = 'en'
mnli_valid_1_df['lang_abv'] = 'en'
mnli_valid_2_df['lang_abv'] = 'en' | Contradictory, My Dear Watson |
11,093,125 | feature_day = [1,20,50,100,200,500,1000]
def CreateInput(data):
feature = []
for day in feature_day:
data.loc[:,'Number day from ' + str(day)+ ' case'] = 0
if(train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].count() > 0):
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['ConfirmedCases'] < day)]['Date'].max()
else:
fromday = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].min()
for i in range(0, len(data)) :
if(data['Date'].iloc[i] > fromday):
day_denta = data['Date'].iloc[i] - fromday
data['Number day from ' + str(day)+ ' case'].iloc[i] = day_denta.days
feature = feature + ['Number day from ' + str(day)+ ' case']
return data[feature]
pred_data_all = pd.DataFrame()
for country in train['Country_Region'].unique() :
for province in train[(train['Country_Region'] == country)]['Province_State'].unique() :
df_train = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]
df_test = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
X_train = CreateInput(df_train)
y_train_confirmed = df_train['ConfirmedCases'].ravel()
y_train_fatalities = df_train['Fatalities'].ravel()
X_pred = CreateInput(df_test)
for day in sorted(feature_day,reverse = True):
feature_use = 'Number day from ' + str(day)+ ' case'
idx = X_train[X_train[feature_use] == 0].shape[0]
if(X_train[X_train[feature_use] > 0].shape[0] >= 20):
break
adjusted_X_train = X_train[idx:][feature_use].values.reshape(-1, 1)
adjusted_y_train_confirmed = y_train_confirmed[idx:]
adjusted_y_train_fatalities = y_train_fatalities[idx:]
idx = X_pred[X_pred[feature_use] == 0].shape[0]
adjusted_X_pred = X_pred[idx:][feature_use].values.reshape(-1, 1)
pred_data = test[(test['Country_Region'] == country)&(test['Province_State'] == province)]
max_train_date = train[(train['Country_Region'] == country)&(train['Province_State'] == province)]['Date'].max()
min_test_date = pred_data['Date'].min()
model = SARIMAX(adjusted_y_train_confirmed, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_confirmed = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_confirmed = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['ConfirmedCases'].values
y_hat_confirmed = np.concatenate(( y_train_confirmed,y_hat_confirmed), axis = 0)
model = SARIMAX(adjusted_y_train_fatalities, order=(1,1,0),
measurement_error=True ).fit(disp=False)
y_hat_fatalities = model.forecast(pred_data[pred_data['Date'] > max_train_date].shape[0])
y_train_fatalities = train[(train['Country_Region'] == country)&(train['Province_State'] == province)&(train['Date'] >= min_test_date)]['Fatalities'].values
y_hat_fatalities = np.concatenate(( y_train_fatalities,y_hat_fatalities), axis = 0)
pred_data['ConfirmedCases_hat'] = y_hat_confirmed
pred_data['Fatalities_hat'] = y_hat_fatalities
pred_data_all = pred_data_all.append(pred_data)
df_val = pd.merge(pred_data_all,train[['Date','Country_Region','Province_State','ConfirmedCases','Fatalities']],on=['Date','Country_Region','Province_State'], how='left')
df_val.loc[df_val['Fatalities_hat'] < 0,'Fatalities_hat'] = 0
df_val.loc[df_val['ConfirmedCases_hat'] < 0,'ConfirmedCases_hat'] = 0
df_val_2 = df_val.copy()<compute_test_metric> | snli = nlp.load_dataset(path='snli')
print('The number of training examples in snli dataset:', snli['train'].num_rows)
print('The number of validation examples in snli dataset:', snli['validation'].num_rows, '
')
print('The class names in snli dataset:', snli['train'].features['label'].names)
print('The feature names in snli dataset:', list(snli['train'].features.keys()), '
')
for idx, elt in enumerate(snli['train']):
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', snli['train'].features['label'].names[elt['label']])
print('-' * 80)
if idx >= 10:
break | Contradictory, My Dear Watson |
11,093,125 | method_list = ['Exponential Smoothing','SARIMA']
method_val = [df_val_1,df_val_2]
for i in range(0,2):
df_val = method_val[i]
method_score = [method_list[i]] + [RMSLE(df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases'].values,df_val[(df_val['ConfirmedCases'].isnull() == False)]['ConfirmedCases_hat'].values)] + [RMSLE(df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities'].values,df_val[(df_val['Fatalities'].isnull() == False)]['Fatalities_hat'].values)]
print(method_score )<save_to_csv> | snli_train_df = pd.DataFrame(snli['train'])
snli_valid_df = pd.DataFrame(snli['validation'])
snli_train_df = snli_train_df[['premise', 'hypothesis', 'label']]
snli_valid_df = snli_valid_df[['premise', 'hypothesis', 'label']]
snli_train_df['lang_abv'] = 'en'
snli_valid_df['lang_abv'] = 'en' | Contradictory, My Dear Watson |
11,093,125 | df_val = df_val_2
submission = df_val[['ForecastId','ConfirmedCases_hat','Fatalities_hat']]
submission.columns = ['ForecastId','ConfirmedCases','Fatalities']
submission.to_csv('submission.csv', index=False)
submission<set_options> | xnli = nlp.load_dataset(path='xnli')
print('The number of validation examples in xnli dataset:', xnli['validation'].num_rows, '
')
print('The class names in xnli dataset:', xnli['validation'].features['label'].names)
print('The feature names in xnli dataset:', list(xnli['validation'].features.keys()), '
')
for idx, elt in enumerate(xnli['validation']):
print('premise:', elt['premise'])
print('hypothesis:', elt['hypothesis'])
print('label:', elt['label'])
print('label name:', xnli['validation'].features['label'].names[elt['label']])
print('-' * 80)
if idx >= 3:
break | Contradictory, My Dear Watson |
11,093,125 | <load_from_csv><EOS> | buffer = {
'premise': [],
'hypothesis': [],
'label': [],
'lang_abv': []
}
for x in xnli['validation']:
label = x['label']
for idx, lang in enumerate(x['hypothesis']['language']):
hypothesis = x['hypothesis']['translation'][idx]
premise = x['premise'][lang]
buffer['premise'].append(premise)
buffer['hypothesis'].append(hypothesis)
buffer['label'].append(label)
buffer['lang_abv'].append(lang)
xnli_valid_df = pd.DataFrame(buffer)
xnli_valid_df = xnli_valid_df[['premise', 'hypothesis', 'label', 'lang_abv']] | Contradictory, My Dear Watson |
10,974,778 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<drop_column> | !pip install transformers==3.0.2
!pip install nlp | Contradictory, My Dear Watson |
10,974,778 | df = df.drop(["Name","Ticket"], axis=1 )<filter> | mnli = load_dataset(path='glue', name='mnli')
| Contradictory, My Dear Watson |
10,974,778 | missing_columns = df.columns[df.isnull().any() ]
df.isnull().sum(axis=0 ).loc[missing_columns]<drop_column> | xnli = pd.read_csv('.. /input/xnli-organized/xnli_df.csv')
xnli = xnli.rename(columns = {'Unnamed: 0': 'lang_abv', '0' : 'premise', '1': 'hypothesis', '0.1': 'label' })
xnli.head() | Contradictory, My Dear Watson |
10,974,778 | df = df.drop(["Cabin"], axis=1 )<load_from_csv> | train_df = pd.read_csv('.. /input/contradictory-my-dear-watson/train.csv')
print('Traning Data, the size of the dataset is: {}
'.format(train_df.shape))
test_df = pd.read_csv('.. /input/contradictory-my-dear-watson/test.csv' ) | Contradictory, My Dear Watson |
10,974,778 | test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
X_test = test_data.copy()
X_test = X_test.drop(["Name","Ticket","Cabin","PassengerId"], axis=1)
X_test.head()<categorify> | train_df = pd.concat([train_df, xnli, mnli_df])
train_df = train_df[train_df['label'] != -1]
mnli_df = None
snli_df = None
print('the shape of the whole DF to be used is: ' + str(train_df.shape)) | Contradictory, My Dear Watson |
10,974,778 | y = df.Survived
X = df.drop(['Survived'], axis=1)
numerical_cols = [cname for cname in X if X[cname].dtype in ['int64', 'float64']]
categorical_cols = [cname for cname in X if X[cname].dtype == "object"]
numerical_transformer = SimpleImputer(strategy='mean')
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')) ,
('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False))
])
preprocessor_temp = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
preprocessor = Pipeline(steps=[
('preprocessor_temp', preprocessor_temp),
('scaler', StandardScaler())])
preprocessor.fit(X )<filter> | train_df = train_df[train_df.duplicated() == False]
print('the shape of the whole DF to be used is: ' + str(train_df.shape)) | Contradictory, My Dear Watson |
10,974,778 | X.iloc[0,:]<normalization> | np.random.seed(123)
max_len = 50
Bert_model = "bert-large-uncased"
Bert_tokenizer = BertTokenizer.from_pretrained(Bert_model)
def tokeniZer(dataset,tokenizer):
encoded_list = []
type_id_list = np.zeros(( dataset.shape[0], max_len))
mask_list = np.zeros(( dataset.shape[0], max_len))
for i in range(dataset.shape[0]):
datapoint = '[CLS] ' + dataset['premise'][i] + ' [SEP]' + dataset['hypothesis'][i] + ' [SEP]'
datapoint = tokenizer.tokenize(datapoint)
datapoint = tokenizer.convert_tokens_to_ids(datapoint)
encoded_list.append(datapoint)
encoded_list = pad_sequences(encoded_list, maxlen = max_len, padding = 'post')
for i in range(encoded_list.shape[0]):
flag = 0
a = encoded_list[i]
for j in range(len(a)) :
if flag == 0:
type_id_list[i,j] = 0
else:
type_id_list[i,j] = 1
if encoded_list[i,j] == 102:
flag = 1
if encoded_list[i,j] == 0:
mask_list[i,j] = 0
else:
mask_list[i,j] = 1
return encoded_list,mask_list,type_id_list
| Contradictory, My Dear Watson |
10,974,778 | dim_trans = preprocessor.transform(X ).shape[1]
preprocessor.transform(X)[0,:]<train_on_grid> | def mish(x):
return x*tanh(softplus(x))
get_custom_objects() ["mish"] = Activation(mish ) | Contradictory, My Dear Watson |
10,974,778 | params_knn = {"kNN__n_neighbors": np.arange(3,10), "kNN__weights":["uniform", "distance"]}
kNN = KNeighborsClassifier()
pipe = Pipeline(steps=[('preprocessor', preprocessor),('kNN', kNN)])
knnpipe = GridSearchCV(pipe, params_knn, n_jobs=-1)
knnpipe.fit(X, y)
print("Best parameter(CV score=%0.3f):" % knnpipe.best_score_)
print(knnpipe.best_params_)
print()
print("Accuracy training set %0.3f" % accuracy_score(y, knnpipe.predict(X)) )<train_on_grid> | tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu ) | Contradictory, My Dear Watson |
10,974,778 | params_logit = {"logit__penalty": ["l1","l2"], "logit__C":np.arange(0.5,1.5,0.1)}
logit = LogisticRegression()
pipe = Pipeline(steps=[('preprocessor', preprocessor),('logit', logit)])
logitpipe = GridSearchCV(pipe, params_logit, n_jobs=-1)
logitpipe.fit(X, y)
print("Best parameter(CV score=%0.3f):" % logitpipe.best_score_)
print(logitpipe.best_params_)
print()
print("Accuracy training set %0.3f" % accuracy_score(y, logitpipe.predict(X)) )<train_on_grid> | def create_BERT(random_seed):
tf.random.set_seed(random_seed)
with tpu_strategy.scope() :
transformer_encoder = TFAutoModel.from_pretrained(Bert_model)
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_layer")
input_masks = Input(shape =(max_len,), dtype = tf.int32, name = 'input_mask')
input_type_id = Input(shape =(max_len,), dtype = tf.int32, name = 'input_type_id')
sequence_output = transformer_encoder([input_ids, input_masks, input_type_id])[0]
cls_token = sequence_output[:, 0, :]
output_layer = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=[input_ids, input_masks, input_type_id], outputs = output_layer)
model.summary()
model.compile(Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model | Contradictory, My Dear Watson |
10,974,778 | params_svm = {"svm__kernel": ["linear","poly","rbf"], "svm__C":np.arange(0.5,1.5,0.1)}
svm = SVC()
pipe = Pipeline(steps=[('preprocessor', preprocessor),('svm', svm)])
svmpipe = GridSearchCV(pipe, params_svm, n_jobs=-1)
svmpipe.fit(X, y)
print("Best parameter(CV score=%0.3f):" % svmpipe.best_score_)
print(svmpipe.best_params_)
print()
print("Accuracy training set %0.3f" % accuracy_score(y, svmpipe.predict(X)) )<train_model> | callbacks = [tf.keras.callbacks.EarlyStopping(patience = 2, monitor = 'val_loss', \
restore_best_weights = True, mode = 'min')]
shuffled_data = shuffle(train_df ).reset_index(drop = True)
train_df = None
batch_size = 128
| Contradictory, My Dear Watson |
10,974,778 | X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state = 1)
preprocessor.fit(X_train)
X_train = preprocessor.transform(X_train)
X_valid = preprocessor.transform(X_valid)
network = Sequential([
Dense(16, activation='relu', input_shape=(dim_trans,)) ,
Dense(16, activation='relu'),
Dense(1, activation='sigmoid'),
])
network.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience=10)
mc = ModelCheckpoint('best_model.h5', monitor='val_accuracy', mode='max', verbose=1, save_best_only=True)
history = network.fit(X_train,
y_train,
epochs=1000,
verbose=1,
validation_data=(X_valid,y_valid),
callbacks=[es, mc])
saved_model = load_model('best_model.h5')
print("Accuracy training set %0.3f" % accuracy_score(y, saved_model.predict(preprocessor.transform(X)) >0.5))<train_model> | XLM_model = "jplu/tf-xlm-roberta-large"
xlm_tokenizer = AutoTokenizer.from_pretrained(XLM_model)
X_train_ids, X_train_masks, _ = tokeniZer(shuffled_data,xlm_tokenizer ) | Contradictory, My Dear Watson |
10,974,778 | X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.2, random_state = 1)
preprocessor.fit(X_train)
X_train = preprocessor.transform(X_train)
X_valid = preprocessor.transform(X_valid)
xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=-1)
xgb.fit(X_train, y_train,
early_stopping_rounds=5,
eval_set=[(X_valid, y_valid)],
verbose=True)
print("Accuracy training set %0.3f" % accuracy_score(y, xgb.predict(preprocessor.transform(X),ntree_limit=xgb.best_ntree_limit)) )<save_to_csv> | def create_xlm(transformer_layer, random_seed, learning_rate = 1e-5):
tf.keras.backend.clear_session()
tf.random.set_seed(random_seed)
with tpu_strategy.scope() :
input_ids = Input(shape =(max_len,), dtype = tf.int32)
input_masks = Input(shape =(max_len,), dtype = tf.int32)
roberta = TFAutoModel.from_pretrained(transformer_layer)
roberta = roberta([input_ids, input_masks])[0]
out = GlobalAveragePooling1D()(roberta)
out = Dense(3, activation = 'softmax' )(out)
model = Model(inputs = [input_ids, input_masks], outputs = out)
model.compile(
optimizer = Adam(lr = learning_rate),
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
model.summary()
return model
Xlm = create_xlm(XLM_model ,123443334, 1e-5 ) | Contradictory, My Dear Watson |
10,974,778 | predictions = xgb.predict(preprocessor.transform(X_test),ntree_limit=xgb.best_ntree_limit)
output = pd.DataFrame({'PassengerId': test_data.PassengerId, 'Survived': predictions})
output.to_csv('my_submission.csv', index=False)
print("Your submission was successfully saved!" )<import_modules> | history_xlm = Xlm.fit([X_train_ids, X_train_masks], shuffled_data['label'],
batch_size = batch_size,
validation_split = 0.2,
epochs = 39, callbacks = callbacks ) | Contradictory, My Dear Watson |
10,974,778 | <load_from_csv><EOS> | input_ids_test_xml, input_masks_test_xml, _ = tokeniZer(test_df, xlm_tokenizer)
predictions_xlm = Xlm.predict([input_ids_test_xml, input_masks_test_xml])
predictions = predictions_xlm
final = np.argmax(predictions, axis = 1)
submission = pd.DataFrame()
submission['id'] = test_df['id']
submission['prediction'] = final.astype(np.int32)
submission.to_csv('submission.csv', index = False ) | Contradictory, My Dear Watson |
11,037,852 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<prepare_x_and_y> | !pip install --quiet googletrans
warnings.filterwarnings('ignore')
print("Currently using Tensorflow version " + tf.__version__ ) | Contradictory, My Dear Watson |
11,037,852 | y_train = train.iloc[:, 1].values<drop_column> | SEED = 34
def seed_everything(seed):
os.environ['PYTHONHASHSEED']=str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
seed_everything(SEED ) | Contradictory, My Dear Watson |
11,037,852 | extra_eda_cols = ['SibSp', 'Parch', 'Family_Size', 'Fare_Range', 'Alone']
train = train.drop(extra_eda_cols, axis = 1, inplace = False)
train.head()<drop_column> | DEVICE = 'TPU'
if DEVICE == "TPU":
print("connecting to TPU...")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("initializing TPU...")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("TPU initialized")
except _:
print("failed to initialize TPU")
else:
DEVICE = "GPU"
if DEVICE != "TPU":
print("Using default strategy for CPU and single GPU")
strategy = tf.distribute.get_strategy()
if DEVICE == "GPU":
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
BATCH_SIZE = 16 * REPLICAS | Contradictory, My Dear Watson |
11,037,852 | extra_cols = ['PassengerId', 'Name', 'Ticket', 'Fare', 'Cabin']
train = train.drop(extra_cols, axis = 1, inplace = False)
train.head()<drop_column> | train = pd.read_csv(".. /input/contradictory-my-dear-watson/train.csv")
test = pd.read_csv(".. /input/contradictory-my-dear-watson/test.csv")
print(f'Train shape: {train.shape}')
train.head() | Contradictory, My Dear Watson |
11,037,852 | x_train = train.drop('Survived', axis = 1, inplace = False)
print(x_train )<count_missing_values> | print(f"Premise: {train['premise'].values[0]}")
print(f"Hypothesis: {train['hypothesis'].values[0]}")
print(f"Label: {train['label'].values[0]}" ) | Contradictory, My Dear Watson |
11,037,852 | train.isnull().sum()<count_missing_values> | print(f"Premise: {train['premise'].values[1]}")
print(f"Hypothesis: {train['hypothesis'].values[1]}")
print(f"Label: {train['label'].values[1]}" ) | Contradictory, My Dear Watson |
11,037,852 | train.isnull().sum()<categorify> | def back_translate(sequence, PROB = 1):
languages = ['en', 'fr', 'th', 'tr', 'ur', 'ru', 'bg', 'de', 'ar', 'zh-cn', 'hi',
'sw', 'vi', 'es', 'el']
translator = Translator()
org_lang = translator.detect(sequence ).lang
random_lang = np.random.choice([lang for lang in languages if lang is not org_lang])
if org_lang in languages:
translated = translator.translate(sequence, dest = random_lang ).text
translated_back = translator.translate(translated, dest = org_lang ).text
if np.random.uniform(0, 1)<= PROB:
output_sequence = translated_back
else:
output_sequence = sequence
else:
output_sequence = sequence
return output_sequence
for i in range(5):
output = back_translate('I genuinely have no idea what the output of this sequence of words will be')
print(output ) | Contradictory, My Dear Watson |
11,037,852 | imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(x_train[['Age']])
x_train[['Age']]= imputer.transform(x_train[['Age']])
imputers = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputers.fit(x_train[['Embarked']])
x_train[['Embarked']]= imputers.transform(x_train[['Embarked']] )<count_missing_values> | train_aug = pd.read_csv('.. /input/contradictorywatsontwicetranslatedaug/translation_aug_train.csv')
train_aug.head() | Contradictory, My Dear Watson |
11,037,852 | x_train.isnull().sum().any()<categorify> | train_twice_aug = pd.read_csv('.. /input/contradictorywatsontwicetranslatedaug/twice_translated_aug_train.csv')
train_twice_aug.head() | Contradictory, My Dear Watson |
11,037,852 | label_encoder = preprocessing.LabelEncoder()
x_train['Sex']= label_encoder.fit_transform(x_train['Sex'])
x_train['Embarked']= label_encoder.fit_transform(x_train['Embarked'] )<normalization> | train_vi = pd.read_csv(".. /input/contradictorytranslatedtrain/train_vi.csv")
train_hi = pd.read_csv(".. /input/contradictorytranslatedtrain/train_hi.csv")
train_bg = pd.read_csv(".. /input/contradictorytranslatedtrain/train_bg.csv" ) | Contradictory, My Dear Watson |
11,037,852 | sc = StandardScaler()
x_train = sc.fit_transform(x_train )<count_missing_values> | !pip install --quiet transformers
roberta_base = "jplu/tf-xlm-roberta-base"
roberta_large = 'jplu/tf-xlm-roberta-large' | Contradictory, My Dear Watson |
11,037,852 | test.isnull().sum().any()<categorify> | test_bt = pd.read_csv('.. /input/contradictorywatsontwicetranslatedaug/translation_aug_test.csv')
test_bt_twice = pd.read_csv('.. /input/contradictorywatsontwicetranslatedaug/translation_aug_test.csv' ) | Contradictory, My Dear Watson |
11,037,852 | imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(test[['Age']])
test[['Age']]= imputer.transform(test[['Age']])
imputers = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputers.fit(test[['Embarked']])
test[['Embarked']]= imputers.transform(test[['Embarked']] )<drop_column> | TOKENIZER = AutoTokenizer.from_pretrained(roberta_large)
def to_tf_dataset(dataset, max_len, repeat = False, shuffle = False, labeled = True, batch_size = BATCH_SIZE):
dataset_text = dataset[['premise', 'hypothesis']].values.tolist()
dataset_enc = TOKENIZER.batch_encode_plus(dataset_text, pad_to_max_length = True, max_length = max_len)
if labeled:
tf_dataset = tf.data.Dataset.from_tensor_slices(( dataset_enc['input_ids'], dataset['label']))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(( dataset_enc['input_ids']))
if repeat: tf_dataset = tf_dataset.repeat()
if shuffle:
tf_dataset = tf_dataset.shuffle(2048)
opt = tf.data.Options()
opt.experimental_deterministic = False
tf_dataset = tf_dataset.with_options(opt)
tf_dataset = tf_dataset.batch(batch_size)
tf_dataset = tf_dataset.prefetch(AUTO)
return tf_dataset | Contradictory, My Dear Watson |
11,037,852 | extra_cols_test = ['PassengerId', 'Name', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin']
test = test.drop(extra_cols_test, axis = 1, inplace = False)
test.head()<categorify> | LR_START = 1e-6
LR_MAX = 1e-6 * 8
LR_MIN = 1e-6
LR_RAMPUP_EPOCHS = 2
LR_SUSTAIN_EPOCHS = 0
LR_DECAY =.8
def lrfn_step(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr = LR_MAX * LR_DECAY**(( epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)//2)
return lr
def lrfn_smooth(epoch):
if epoch < LR_RAMPUP_EPOCHS:
lr =(LR_MAX - LR_START)/ LR_RAMPUP_EPOCHS * epoch + LR_START
elif epoch < LR_RAMPUP_EPOCHS + LR_SUSTAIN_EPOCHS:
lr = LR_MAX
else:
lr =(LR_MAX - LR_MIN)* LR_DECAY**(epoch - LR_RAMPUP_EPOCHS - LR_SUSTAIN_EPOCHS)+ LR_MIN
return lr
lr_callback_step = tf.keras.callbacks.LearningRateScheduler(lrfn_step, verbose = True)
lr_callback_smooth = tf.keras.callbacks.LearningRateScheduler(lrfn_smooth, verbose = True)
rng = [i for i in range(25)]
y1 = [lrfn_step(x)for x in rng]
y2 = [lrfn_smooth(x)for x in rng]
fix, ax = plt.subplots(1,2, figsize =(15, 5))
ax[0].plot(rng, y1)
ax[1].plot(rng, y2)
plt.tight_layout()
print("Learning rate schedule for step schedule: {:.3g} to {:.3g} to {:.3g}".format(y1[0], max(y1), y1[-1]))
print("Learning rate schedule for smooth schedule: {:.3g} to {:.3g} to {:.3g}".format(y2[0], max(y2), y2[-1])) | Contradictory, My Dear Watson |
11,037,852 | label_encoder = preprocessing.LabelEncoder()
test['Sex']= label_encoder.fit_transform(test['Sex'])
test['Embarked']= label_encoder.fit_transform(test['Embarked'] )<normalization> | def build_model(transformer_layer, max_len, learning_rate):
with strategy.scope() :
input_ids = tf.keras.Input(shape =(max_len,), dtype = tf.int32)
roberta = TFAutoModel.from_pretrained(transformer_layer)
roberta = roberta(input_ids)[0]
out = roberta[:, 0, :]
out = tf.keras.layers.Dense(3, activation = 'softmax' )(out)
model = tf.keras.Model(inputs = input_ids, outputs = out)
model.compile(
optimizer = tf.keras.optimizers.Adam(lr = learning_rate),
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'])
return model | Contradictory, My Dear Watson |
11,037,852 | sc_x = StandardScaler()
test = sc_x.fit_transform(test )<train_model> | LR_RATE = 1e-5
EPOCHS = 10
FOLDS = 4
MAX_LEN = 85
STEPS_PER_EPOCH = len(train)// BATCH_SIZE
TTA = 3
VERBOSE = 2
preds = np.zeros(( len(test), 3))
preds_tta = np.zeros(( len(test), 3))
skf = KFold(n_splits=FOLDS,shuffle=True,random_state=SEED)
for fold,(train_index,val_index)in enumerate(skf.split(train)) :
if DEVICE=='TPU':
if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)
K.clear_session()
model = build_model(roberta_large, max_len = MAX_LEN, learning_rate = LR_RATE)
sv = tf.keras.callbacks.ModelCheckpoint(f'fold-{fold}.h5', monitor = 'val_loss', verbose = 0,
save_best_only = True, save_weights_only = True, mode = 'min')
train_ds = to_tf_dataset(train.loc[train_index], labeled = True, shuffle = True, repeat = True, max_len = MAX_LEN)
val_ds = to_tf_dataset(train.loc[val_index], labeled = True, shuffle = False, repeat = False, max_len = MAX_LEN)
print('
print('Training...'); print('')
history = model.fit(train_ds, validation_data = val_ds, callbacks = [sv],
epochs = EPOCHS, steps_per_epoch = STEPS_PER_EPOCH,
verbose = VERBOSE); print('')
print('Loading best model...')
model.load_weights(f'fold-{fold}.h5')
print('Predicting validation with TTA...')
val_df = train.loc[val_index]
val_df_bt = train_aug.loc[val_index]
val_df_bt_twice = train_twice_aug.loc[val_index]
val_tta1 = to_tf_dataset(val_df, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
val_tta2 = to_tf_dataset(val_df_bt, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
val_tta3 = to_tf_dataset(val_df_bt_twice, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
val_pred1 = model.predict(val_tta1, verbose = VERBOSE)
val_pred2 = model.predict(val_tta2, verbose = VERBOSE)
val_pred3 = model.predict(val_tta3, verbose = VERBOSE)
val_preds =(val_pred1 + val_pred2 + val_pred3)/ TTA
print(f"Without TTA: {accuracy_score(val_pred1.argmax(axis = 1), val_df['label'])}")
print(f"With TTA: {accuracy_score(val_preds.argmax(axis = 1), val_df['label'])}")
print('')
print('Predicting OOF with TTA...')
test_tta1 = to_tf_dataset(test, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
test_tta2 = to_tf_dataset(test_bt, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
test_tta3 = to_tf_dataset(test_bt_twice, shuffle = False, labeled = False, repeat = False, max_len = MAX_LEN)
pred1 = model.predict(test_tta1, verbose = VERBOSE)
pred2 = model.predict(test_tta2, verbose = VERBOSE)
pred3 = model.predict(test_tta3, verbose = VERBOSE)
preds_tta +=(pred1 + pred2 + pred3)/ TTA / FOLDS
preds += pred1 / FOLDS
os.remove(f"/kaggle/working/fold-{fold}.h5")
del model ; z = gc.collect() | Contradictory, My Dear Watson |
11,037,852 | classifier = LogisticRegression(random_state = 0)
classifier.fit(x_train, y_train )<predict_on_test> | USE_TTA = False | Contradictory, My Dear Watson |
11,037,852 | y_pred = classifier.predict(test)
acc_Tree = cross_val_score(classifier, x_train, y_train, cv=10, scoring='accuracy' ).mean()
acc_Tree<train_model> | if USE_TTA:
submission = pd.DataFrame()
submission['id'] = test['id']
submission['prediction'] = preds_tta.argmax(axis = 1)
else:
submission = pd.DataFrame()
submission['id'] = test['id']
submission['prediction'] = preds.argmax(axis = 1)
submission.head() | Contradictory, My Dear Watson |
11,037,852 | <predict_on_test><EOS> | submission.to_csv('submission.csv', index = False)
print('Submission saved' ) | Contradictory, My Dear Watson |
11,038,328 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<choose_model_class> | import os
import numpy as np
import pandas as pd
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
import transformers
from transformers import TFAutoModel, AutoTokenizer
from tqdm.notebook import tqdm
import plotly.express as px | Contradictory, My Dear Watson |
11,038,328 | classifier = DecisionTreeClassifier()
classifier.fit(x_train, y_train)
y_pred = classifier.predict(test )<predict_on_test> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
11,038,328 | y_pred = classifier.predict(test)
acc_Tree = cross_val_score(classifier, x_train, y_train, cv=10, scoring='accuracy' ).mean()
acc_Tree<train_model> | n_epochs = 250
max_len = 80
batch_size = 16 * strategy.num_replicas_in_sync | Contradictory, My Dear Watson |
11,038,328 | classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(x_train, y_train )<predict_on_test> | train = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/test.csv')
submission = pd.read_csv('/kaggle/input/contradictory-my-dear-watson/sample_submission.csv' ) | Contradictory, My Dear Watson |
11,038,328 | y_pred = classifier.predict(test)
acc_Tree = cross_val_score(classifier, x_train, y_train, cv=10, scoring='accuracy' ).mean()
acc_Tree<predict_on_test> | tokenizer = AutoTokenizer.from_pretrained('jplu/tf-xlm-roberta-large' ) | Contradictory, My Dear Watson |
11,038,328 | classifier = SVC()
classifier.fit(x_train, y_train)
y_pred = classifier.predict(test )<predict_on_test> | train_text = train[['premise', 'hypothesis']].values.tolist()
test_text = test[['premise', 'hypothesis']].values.tolist()
train_encoded = tokenizer.batch_encode_plus(
train_text,
pad_to_max_length=True,
max_length=max_len
)
test_encoded = tokenizer.batch_encode_plus(
test_text,
pad_to_max_length=True,
max_length=max_len
) | Contradictory, My Dear Watson |
11,038,328 | y_pred = classifier.predict(test)
acc_Tree = cross_val_score(classifier, x_train, y_train, cv=10, scoring='accuracy' ).mean()
acc_Tree<prepare_output> | x_train, x_valid, y_train, y_valid = train_test_split(
train_encoded['input_ids'], train.label.values,
test_size=0.20, random_state=2020
)
x_test = test_encoded['input_ids'] | Contradictory, My Dear Watson |
11,038,328 | accuracy = {'Model' : ['Logistic Regression', 'K- Nearest Neighbor', 'SVC', 'Decision Tree', 'Random Forest'],
'Accuracy' : [0.7890, 0.8047, 0.8226, 0.7935, 0.8037]
}
all_cross_val_scores = pd.DataFrame(accuracy, columns = ['Model', 'Accuracy'])
all_cross_val_scores.head()<save_to_csv> | auto = tf.data.experimental.AUTOTUNE
train_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset =(
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
) | Contradictory, My Dear Watson |
11,038,328 | test_df = pd.read_csv('.. /input/titanic/test.csv')
submission = pd.DataFrame({
'PassengerId': test_df['PassengerId'],
'Survived': y_pred
})
submission.to_csv('titanic_prediction.csv', index=False)
print('File Saved' )<import_modules> | with strategy.scope() :
transformer_encoder = TFAutoModel.from_pretrained('jplu/tf-xlm-roberta-large')
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer_encoder(input_ids)[0]
cls_token = sequence_output[:, 0, :]
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-3),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
model.summary() | Contradictory, My Dear Watson |
11,038,328 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns<load_from_csv> | n_steps = len(x_train)// batch_size
train_history = model.fit(
train_dataset,
steps_per_epoch=n_steps,
validation_data=valid_dataset,
epochs=n_epochs
) | Contradictory, My Dear Watson |
11,038,328 | train = pd.read_csv('.. /input/titanic/train.csv')
train.head()<define_variables> | test_preds = model.predict(test_dataset, verbose=1)
submission['prediction'] = test_preds.argmax(axis=1 ) | Contradictory, My Dear Watson |
11,038,328 | <feature_engineering><EOS> | submission.to_csv('submission.csv', index=False)
| Contradictory, My Dear Watson |
11,008,485 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<split> | plt.style.use('fivethirtyeight')
warnings.filterwarnings('ignore')
| Contradictory, My Dear Watson |
11,008,485 | mask =(train[target] == 1)
t_result = []
for num in numerical:
t_stat, p_val = ttest_ind(( train[mask])[num],
(train[~mask])[num],
equal_var = True,
nan_policy = 'omit')
t_result.append({
'group_1' : 'Survived',
'group_2' : 'Died',
'variable' : num,
't_stat' : t_stat,
'p_value' : p_val
})
t_result = pd.DataFrame(t_result )<feature_engineering> | df_train=pd.read_csv(os.path.join(path,"train.csv"))
df_test=pd.read_csv(os.path.join(path,"test.csv")) | Contradictory, My Dear Watson |
11,008,485 | def get_titles(data):
title_re = re.compile(r'(?:^.+),(\w+)')
titles = []
for name in data['Name']:
titles.append(title_re.findall(name)[0])
data['Title'] = titles
for i, title in enumerate(data['Title']):
if title in ['Miss', 'Ms', 'Mlle', 'Mrs', 'Mme']:
cat = 'Mrs_Ms'
elif title in ['Mr', 'Don']:
cat = 'Mr'
elif title in ['Master']:
cat = 'Master'
else:
cat = 'Honorable'
data.loc[i, 'Title'] = cat<create_dataframe> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print('Running on TPU ', tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync ) | Contradictory, My Dear Watson |
11,008,485 | chi2_result = []
for cat in categorical:
crosstab = pd.crosstab(train[cat], train[target])
chi2_stat, p_val, dof, ex = chi2_contingency(crosstab)
chi2_result.append({
'var_1' : cat,
'var_2' : target,
'chi2' : chi2_stat,
'dof' : dof,
'p_value' : p_val
})
chi2_result = pd.DataFrame(chi2_result )<count_missing_values> | MODEL = 'jplu/tf-xlm-roberta-large'
EPOCHS = 10
MAX_LEN = 96
BATCH_SIZE= 16 * strategy.num_replicas_in_sync
AUTO = tf.data.experimental.AUTOTUNE
tokenizer = AutoTokenizer.from_pretrained(MODEL ) | Contradictory, My Dear Watson |
11,008,485 | train.isnull().sum()<count_missing_values> | def quick_encode(df,maxlen=100):
values = df[['premise','hypothesis']].values.tolist()
tokens=tokenizer.batch_encode_plus(values,max_length=maxlen,pad_to_max_length=True)
return np.array(tokens['input_ids'])
x_train = quick_encode(df_train)
x_test = quick_encode(df_test)
y_train = df_train.label.values
| Contradictory, My Dear Watson |
11,008,485 | train.isnull().sum()<prepare_x_and_y> | def create_dist_dataset(X, y,val,batch_size= BATCH_SIZE):
dataset = tf.data.Dataset.from_tensor_slices(( X,y)).shuffle(len(X))
if not val:
dataset = dataset.repeat().batch(batch_size ).prefetch(AUTO)
else:
dataset = dataset.batch(batch_size ).prefetch(AUTO)
return dataset
test_dataset =(
tf.data.Dataset
.from_tensor_slices(( x_test))
.batch(BATCH_SIZE)
)
| Contradictory, My Dear Watson |
11,008,485 | X = train.copy()
y = X.pop(target )<prepare_x_and_y> | def build_model(transformer,max_len):
input_ids = Input(shape=(max_len,), dtype=tf.int32, name="input_ids")
sequence_output = transformer(input_ids)[0]
cls_token = sequence_output[:, 0, :]
cls_token = Dropout(0.4 )(cls_token)
cls_token = Dense(32,activation='relu' )(cls_token)
cls_token = Dropout(0.4 )(cls_token)
out = Dense(3, activation='softmax' )(cls_token)
model = Model(inputs=input_ids, outputs=out)
model.compile(
Adam(lr=1e-5),
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
return model
| Contradictory, My Dear Watson |
11,008,485 | median_imputer = SimpleImputer(strategy = 'median')
mode_imputer = SimpleImputer(strategy = 'most_frequent')
missing_transformer = ColumnTransformer([('num', median_imputer, numerical),
('cat', mode_imputer, categorical)])
missing_transformer.fit(X)
X_imp = pd.DataFrame(missing_transformer.transform(X))
X_imp.columns = numerical + categorical
X.drop(numerical + categorical, axis = 1, inplace = True)
X = pd.concat([X, X_imp], axis = 1 )<categorify> | pred_test=np.zeros(( df_test.shape[0],3))
skf = StratifiedKFold(n_splits=5,shuffle=True,random_state=777)
val_score=[]
history=[]
for fold,(train_ind,valid_ind)in enumerate(skf.split(x_train,y_train)) :
if fold < 4:
print("fold",fold+1)
tf.tpu.experimental.initialize_tpu_system(tpu)
train_data = create_dist_dataset(x_train[train_ind],y_train[train_ind],val=False)
valid_data = create_dist_dataset(x_train[valid_ind],y_train[valid_ind],val=True)
Checkpoint=tf.keras.callbacks.ModelCheckpoint(f"roberta_base.h5", monitor='val_loss', verbose=0, save_best_only=True,
save_weights_only=True, mode='min')
with strategy.scope() :
transformer_layer = TFAutoModel.from_pretrained(MODEL)
model = build_model(transformer_layer, max_len=MAX_LEN)
n_steps = len(train_ind)//BATCH_SIZE
print("training model {} ".format(fold+1))
train_history = model.fit(
train_data,
steps_per_epoch=n_steps,
validation_data=valid_data,
epochs=EPOCHS,callbacks=[Checkpoint],verbose=1)
print("Loading model...")
model.load_weights(f"roberta_base.h5")
print("fold {} validation accuracy {}".format(fold+1,np.mean(train_history.history['val_accuracy'])))
print("fold {} validation loss {}".format(fold+1,np.mean(train_history.history['val_loss'])))
history.append(train_history)
val_score.append(np.mean(train_history.history['val_accuracy']))
print('predict on test.... ')
preds=model.predict(test_dataset,verbose=1)
pred_test+=preds/4
| Contradictory, My Dear Watson |
11,008,485 | ordinal_encoder = OrdinalEncoder()
X_enc = pd.DataFrame(ordinal_encoder.fit_transform(X[categorical]))
X_enc.columns = categorical
X.drop(categorical, axis = 1, inplace = True)
X = pd.concat([X, X_enc], axis = 1)
X = pd.get_dummies(X, columns = categorical, dtype = np.int64, drop_first = True )<compute_train_metric> | submission = pd.read_csv(os.path.join(path,'sample_submission.csv'))
submission['prediction'] = np.argmax(pred_test,axis=1)
submission.head() | Contradictory, My Dear Watson |
11,008,485 | <split><EOS> | submission.to_csv('submission.csv',index=False ) | Contradictory, My Dear Watson |
10,966,859 | <SOS> metric: categorizationaccuracy Kaggle data source: contradictory,-my-dear-watson<define_variables> | try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.cluster_spec().as_dict() ['worker'])
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy() | Contradictory, My Dear Watson |
10,966,859 | features = ['Title_2.0', 'Sex_1.0', 'Fare']<import_modules> | !pip install nlp
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py
!python pytorch-xla-env-setup.py --version nightly --apt-packages libomp5 libopenblas-dev | Contradictory, My Dear Watson |
10,966,859 | clfs = {
'tree' : DecisionTreeClassifier() ,
'rf' : RandomForestClassifier() ,
'extra' : ExtraTreesClassifier() ,
'grad' : GradientBoostingClassifier() ,
}<find_best_model_class> | %%time
%autosave 60
os.environ['XLA_USE_BF16'] = "1"
os.environ['XLA_TENSOR_ALLOCATOR_MAXSIZE'] = '100000000'
gc.enable()
XLMRobertaTokenizer,
XLMRobertaModel,
get_cosine_schedule_with_warmup)
warnings.filterwarnings("ignore")
print('PYTORCH:', xv.__torch_gitrev__)
print('XLA:', xv.__xla_gitrev__ ) | Contradictory, My Dear Watson |
10,966,859 | clf_result = []
for model_name, model in clfs.items() :
model.fit(X_train[features], y_train)
preds = model.predict(X_test[features])
cv_score = cross_val_score(model, X_train[features], y_train, scoring = 'accuracy', cv = 5)
clf_result.append({
'model' : model_name,
'mean acc' : cv_score.mean() ,
'std acc' : cv_score.std()
})
clf_result = pd.DataFrame(clf_result )<train_on_grid> | train = pd.read_csv('.. /input/contradictory-my-dear-watson/train.csv')
test = pd.read_csv('.. /input/contradictory-my-dear-watson/test.csv')
sample_submission = pd.read_csv('.. /input/contradictory-my-dear-watson/sample_submission.csv' ) | Contradictory, My Dear Watson |
10,966,859 | params = {
'n_estimators' : [50, 100, 150, 200],
'learning_rate' : [0.01, 0.03, 0.05, 0.07, 0.1, 0.3],
'subsample' : [0.4, 0.45, 0.5, 0.55, 0.7],
'max_features' : ['auto', 'sqrt'],
'n_iter_no_change' : [0, 5, 10],
'max_depth' : [2, 3, 4],
}
base_clf = GradientBoostingClassifier(criterion = 'mse', validation_fraction = 0.25, random_state = 0)
grid_search = RandomizedSearchCV(base_clf, params, scoring = 'accuracy', random_state = 0, n_iter = 40)
grid_search.fit(X_train[features], y_train)
print('40 random searches complete' )<create_dataframe> | TRAIN_BATCH_SIZE = 16
VALID_BATCH_SIZE = 16
EPOCHS = 4
MAX_LEN = 80
LR = 2e-5 * xm.xrt_world_size()
METRICS_DEBUG = True
tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large' ) | Contradictory, My Dear Watson |
10,966,859 | grid_search_result = pd.DataFrame(grid_search.cv_results_)
grid_search_result.loc[grid_search_result['rank_test_score'] < 5].sort_values('rank_test_score' )<choose_model_class> | mnli = nlp.load_dataset(path='glue', name='mnli', split='train[:5%]')
xnli = nlp.load_dataset(path='xnli')
xnli = nlp.concatenate_datasets([xnli['test'], xnli['validation']])
snli = nlp.load_dataset(path='snli', split='train[:5%]' ) | Contradictory, My Dear Watson |
10,966,859 | clf_1 = GradientBoostingClassifier(n_estimators = 200,
subsample = 0.45,
max_features = 'auto',
criterion = 'mse',
n_iter_no_change = 10,
learning_rate = 0.03,
validation_fraction = 0.25,
max_depth = 3,
random_state = 0)
clf_2 = GradientBoostingClassifier(n_estimators = 50,
subsample = 0.7,
max_features = 'sqrt',
criterion = 'mse',
n_iter_no_change = 10,
learning_rate = 0.03,
validation_fraction = 0.25,
max_depth = 4,
random_state = 0)
clf_3 = GradientBoostingClassifier(n_estimators = 150,
subsample = 0.7,
max_features = 'auto',
criterion = 'mse',
n_iter_no_change = 5,
learning_rate = 0.1,
validation_fraction = 0.25,
max_depth = 2,
random_state = 0 )<choose_model_class> | def convert_to_features(batch):
input_pairs = list(zip(batch['premise'], batch['hypothesis']))
encodings = tokenizer.batch_encode_plus(input_pairs,
add_special_tokens=True,
pad_to_max_length=True,
max_length=MAX_LEN,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True)
return encodings | Contradictory, My Dear Watson |
10,966,859 | final_clf = GradientBoostingClassifier(n_estimators = 150,
subsample = 0.5,
max_features = 'auto',
criterion = 'mse',
n_iter_no_change = 5,
learning_rate = 0.07,
validation_fraction = 0.25,
max_depth = 3,
random_state = 0)
plot_learning_curve(final_clf )<train_model> | def preprocess_xnli(example):
premise_output = []
hypothesis_output = []
label_output = []
for prem, hyp, lab in zip(example['premise'], example['hypothesis'], example["label"]):
label = lab
langs = hyp['language']
translations = hyp['translation']
hypothesis = {k: v for k, v in zip(langs, translations)}
for lang in prem:
if lang in hypothesis:
premise_output += [prem[lang]]
hypothesis_output += [hypothesis[lang]]
label_output += [label]
return {'premise':premise_output, 'hypothesis':hypothesis_output, 'label':label_output} | Contradictory, My Dear Watson |
10,966,859 | final_clf.fit(X_train[features], y_train)
preds = final_clf.predict(X_val[features])
print('Number of estimators after early stopping: {}'.format(final_clf.n_estimators_))
print('Accuracy: {}'.format(accuracy_score(preds, y_val)))
print('F1: {}'.format(f1_score(preds, y_val)) )<load_from_csv> | mnli_encoded = mnli.map(convert_to_features, batched=True, remove_columns=['idx', 'premise', 'hypothesis'])
mnli_encoded.set_format("torch", columns=['attention_mask', 'input_ids', 'token_type_ids', 'label'] ) | Contradictory, My Dear Watson |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.