path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
34118365/cell_3 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from glob import glob
from itertools import chain
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv')
all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))}
all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get)
all_xray_df.sample(3)
all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist())))
all_labels = [x for x in all_labels if len(x) > 0]
print('All Labels ({}): {}'.format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1:
all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0)
all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia']
all_xray_df.sample(3) | code |
34118365/cell_17 | [
"text_plain_output_1.png"
] | from glob import glob
from itertools import chain
from keras.applications.resnet_v2 import ResNet50V2
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau
from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation
from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D
from keras.models import Sequential, Model
from keras.models import model_from_json
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from random import sample
from sklearn.metrics import roc_curve, auc, precision_recall_curve, average_precision_score, plot_precision_recall_curve, f1_score, confusion_matrix, accuracy_score
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn.model_selection as skl
all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv')
all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))}
all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get)
all_xray_df.sample(3)
all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist())))
all_labels = [x for x in all_labels if len(x) > 0]
for c_label in all_labels:
if len(c_label) > 1:
all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0)
all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia']
all_xray_df.sample(3)
def create_splits(df, test_size, column_name):
train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name])
p_inds = train_df[train_df[column_name] == 1].index.tolist()
np_inds = train_df[train_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, len(p_inds))
train_df = train_df.loc[p_inds + np_sample]
p_inds = valid_df[valid_df[column_name] == 1].index.tolist()
np_inds = valid_df[valid_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, 4 * len(p_inds))
valid_df = valid_df.loc[p_inds + np_sample]
return (train_df, valid_df)
train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class')
def my_image_augmentation():
my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1)
return my_idg
def make_train_gen(train_df, img_size, batch_size):
idg = my_image_augmentation()
train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return train_gen
def make_val_gen(valid_df, img_size, batch_size):
val_idg = ImageDataGenerator(rescale=1.0 / 255.0)
val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return val_gen
batch_size = 64
img_size = (224, 224)
train_gen = make_train_gen(train_df, img_size, batch_size)
val_gen = make_val_gen(valid_df, img_size, batch_size)
## May want to look at some examples of our augmented training data.
## This is helpful for understanding the extent to which data is being manipulated prior to training,
## and can be compared with how the raw data look prior to augmentation
t_x, t_y = next(train_gen)
fig, m_axs = plt.subplots(4, 4, figsize = (16, 16))
for (c_x, c_y, c_ax) in zip(t_x, t_y, m_axs.flatten()):
c_ax.imshow(c_x[:,:,0], cmap = 'bone')
if c_y == 1:
c_ax.set_title('Pneumonia')
else:
c_ax.set_title('No Pneumonia')
c_ax.axis('off')
def load_pretrained_model():
"""
model = VGG16(include_top=True, weights='imagenet')
transfer_layer = model.get_layer('block5_pool')
vgg_model = Model(inputs = model.input, outputs = transfer_layer.output)
for layer in vgg_model.layers[0:17]:
layer.trainable = False
"""
model = ResNet50V2(include_top=False, weights='imagenet')
resnet_model = Model(inputs=model.input, outputs=model.output, name='Resnet')
return resnet_model
def build_my_model():
"""
# my_model = Sequential()
# ....add your pre-trained model, and then whatever additional layers you think you might
# want for fine-tuning (Flatteen, Dense, Dropout, etc.)
# if you want to compile your model within this function, consider which layers of your pre-trained model,
# you want to freeze before you compile
# also make sure you set your optimizer, loss function, and metrics to monitor
# Todo
my_model = Sequential()
vgg_model = load_pretrained_model()
# Add the convolutional part of the VGG16 model from above.
my_model.add(vgg_model)
# Flatten the output of the VGG16 model because it is from a
# convolutional layer.
my_model.add(Flatten())
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(1024, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(512, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(256, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(1, activation='sigmoid'))
"""
resnet_model = load_pretrained_model()
my_model = Sequential([resnet_model, BatchNormalization(), Conv2D(1024, 1, activation='relu'), Dropout(0.5), BatchNormalization(), Conv2D(256, 1, activation='relu'), Dropout(0.5), AveragePooling2D((7, 7)), BatchNormalization(), Conv2D(1, 1, activation='sigmoid'), Reshape((-1,))])
return my_model
my_model = build_my_model()
my_model.summary()
weight_path = '{}_my_model.best.hdf5'.format('xray_class')
checkpoint = ModelCheckpoint(weight_path, monitor='val_binary_accuracy', verbose=1, save_best_only=True, mode='auto', save_weights_only=True)
early = EarlyStopping(monitor='val_binary_accuracy', mode='auto', patience=5)
def scheduler(epoch, lr):
if epoch < 1:
return lr
else:
return lr * np.exp(-0.1)
lr_scheduler = LearningRateScheduler(scheduler)
callbacks_list = [checkpoint, early, lr_scheduler]
from keras.models import model_from_json
model_path = '/kaggle/input/model-and-weights/my_model2.json'
weight_path = '/kaggle/input/model-and-weights/xray_class_my_model2.best.hdf5'
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
my_model = model_from_json(loaded_model_json)
my_model.load_weights(weight_path)
optimizer = RMSprop(learning_rate=0.0001)
loss = 'binary_crossentropy'
metrics = ['binary_accuracy']
my_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
history = my_model.fit_generator(train_gen, validation_data=(valX, valY), epochs=10, callbacks=callbacks_list)
weight_path = 'xray_class_my_model.best.hdf5'
my_model.load_weights(weight_path)
pred_Y = my_model.predict(valX, batch_size=100, verbose=True)
def plot_auc(t_y, p_y):
fpr, tpr, threshold = roc_curve(valY, pred_Y)
roc_auc = auc(fpr, tpr)
plt.xlim([0, 1])
plt.ylim([0, 1])
return
def plot_prec_rec(val_Y, pred_Y):
prec, rec, threshold = precision_recall_curve(val_Y, pred_Y)
plt.xlim([0, 1])
plt.ylim([0, 1])
def plot_history(history):
n = len(history.history['loss'])
return
def optimize_accuracy(t_y, p_y):
best_threshold = None
best_accuracy = 0.0
for t in np.arange(0.5, 1, 0.1):
pred = (p_y.reshape(-1) > t) * 1.0
accuracy = np.mean(pred == t_y)
if accuracy > best_accuracy:
best_threshold = t
best_accuracy = accuracy
return (best_threshold, best_accuracy)
best_threshold, best_accuracy = optimize_accuracy(valY, pred_Y)
print('Threshold of %.2f gives best accuracy at %.4f' % (best_threshold, best_accuracy))
pred_Y_class = pred_Y > best_threshold
f1_score(valY, pred_Y_class) | code |
34118365/cell_14 | [
"text_plain_output_1.png"
] | from glob import glob
from itertools import chain
from keras.applications.resnet_v2 import ResNet50V2
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, TensorBoard, ReduceLROnPlateau
from keras.layers import Conv2D, SeparableConv2D, MaxPool2D, LeakyReLU, Activation
from keras.layers import GlobalAveragePooling2D, MaxPooling2D, Reshape
from keras.layers import Input, Dense, Flatten, Dropout, BatchNormalization, AveragePooling2D
from keras.models import Sequential, Model
from keras.models import model_from_json
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from random import sample
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn.model_selection as skl
all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv')
all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))}
all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get)
all_xray_df.sample(3)
all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist())))
all_labels = [x for x in all_labels if len(x) > 0]
for c_label in all_labels:
if len(c_label) > 1:
all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0)
all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia']
all_xray_df.sample(3)
def create_splits(df, test_size, column_name):
train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name])
p_inds = train_df[train_df[column_name] == 1].index.tolist()
np_inds = train_df[train_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, len(p_inds))
train_df = train_df.loc[p_inds + np_sample]
p_inds = valid_df[valid_df[column_name] == 1].index.tolist()
np_inds = valid_df[valid_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, 4 * len(p_inds))
valid_df = valid_df.loc[p_inds + np_sample]
return (train_df, valid_df)
train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class')
def my_image_augmentation():
my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1)
return my_idg
def make_train_gen(train_df, img_size, batch_size):
idg = my_image_augmentation()
train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return train_gen
def make_val_gen(valid_df, img_size, batch_size):
val_idg = ImageDataGenerator(rescale=1.0 / 255.0)
val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return val_gen
batch_size = 64
img_size = (224, 224)
train_gen = make_train_gen(train_df, img_size, batch_size)
val_gen = make_val_gen(valid_df, img_size, batch_size)
def load_pretrained_model():
"""
model = VGG16(include_top=True, weights='imagenet')
transfer_layer = model.get_layer('block5_pool')
vgg_model = Model(inputs = model.input, outputs = transfer_layer.output)
for layer in vgg_model.layers[0:17]:
layer.trainable = False
"""
model = ResNet50V2(include_top=False, weights='imagenet')
resnet_model = Model(inputs=model.input, outputs=model.output, name='Resnet')
return resnet_model
def build_my_model():
"""
# my_model = Sequential()
# ....add your pre-trained model, and then whatever additional layers you think you might
# want for fine-tuning (Flatteen, Dense, Dropout, etc.)
# if you want to compile your model within this function, consider which layers of your pre-trained model,
# you want to freeze before you compile
# also make sure you set your optimizer, loss function, and metrics to monitor
# Todo
my_model = Sequential()
vgg_model = load_pretrained_model()
# Add the convolutional part of the VGG16 model from above.
my_model.add(vgg_model)
# Flatten the output of the VGG16 model because it is from a
# convolutional layer.
my_model.add(Flatten())
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(1024, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(512, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(256, activation='relu'))
# Add a dropout-layer which may prevent overfitting and
# improve generalization ability to unseen data e.g. the test-set.
my_model.add(Dropout(0.5))
# Add a dense (aka. fully-connected) layer.
# This is for combining features that the VGG16 model has
# recognized in the image.
my_model.add(Dense(1, activation='sigmoid'))
"""
resnet_model = load_pretrained_model()
my_model = Sequential([resnet_model, BatchNormalization(), Conv2D(1024, 1, activation='relu'), Dropout(0.5), BatchNormalization(), Conv2D(256, 1, activation='relu'), Dropout(0.5), AveragePooling2D((7, 7)), BatchNormalization(), Conv2D(1, 1, activation='sigmoid'), Reshape((-1,))])
return my_model
my_model = build_my_model()
my_model.summary()
weight_path = '{}_my_model.best.hdf5'.format('xray_class')
checkpoint = ModelCheckpoint(weight_path, monitor='val_binary_accuracy', verbose=1, save_best_only=True, mode='auto', save_weights_only=True)
early = EarlyStopping(monitor='val_binary_accuracy', mode='auto', patience=5)
def scheduler(epoch, lr):
if epoch < 1:
return lr
else:
return lr * np.exp(-0.1)
lr_scheduler = LearningRateScheduler(scheduler)
callbacks_list = [checkpoint, early, lr_scheduler]
from keras.models import model_from_json
model_path = '/kaggle/input/model-and-weights/my_model2.json'
weight_path = '/kaggle/input/model-and-weights/xray_class_my_model2.best.hdf5'
json_file = open(model_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
my_model = model_from_json(loaded_model_json)
my_model.load_weights(weight_path)
optimizer = RMSprop(learning_rate=0.0001)
loss = 'binary_crossentropy'
metrics = ['binary_accuracy']
my_model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
history = my_model.fit_generator(train_gen, validation_data=(valX, valY), epochs=10, callbacks=callbacks_list)
weight_path = 'xray_class_my_model.best.hdf5'
my_model.load_weights(weight_path)
pred_Y = my_model.predict(valX, batch_size=100, verbose=True) | code |
34118365/cell_12 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import multiprocessing as mp
import multiprocessing as mp
cpu_count = mp.cpu_count()
cpu_count | code |
34118365/cell_5 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from glob import glob
from itertools import chain
from keras.preprocessing.image import ImageDataGenerator
from random import sample
import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import sklearn.model_selection as skl
all_xray_df = pd.read_csv('/kaggle/input/data/Data_Entry_2017.csv')
all_image_paths = {os.path.basename(x): x for x in glob(os.path.join('/kaggle/input/data', 'images*', '*', '*.png'))}
all_xray_df['path'] = all_xray_df['Image Index'].map(all_image_paths.get)
all_xray_df.sample(3)
all_labels = np.unique(list(chain(*all_xray_df['Finding Labels'].map(lambda x: x.split('|')).tolist())))
all_labels = [x for x in all_labels if len(x) > 0]
for c_label in all_labels:
if len(c_label) > 1:
all_xray_df[c_label] = all_xray_df['Finding Labels'].map(lambda finding: 1.0 if c_label in finding else 0)
all_xray_df['pneumonia_class'] = all_xray_df['Pneumonia']
all_xray_df.sample(3)
def create_splits(df, test_size, column_name):
train_df, valid_df = skl.train_test_split(df, test_size=test_size, stratify=df[column_name])
p_inds = train_df[train_df[column_name] == 1].index.tolist()
np_inds = train_df[train_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, len(p_inds))
train_df = train_df.loc[p_inds + np_sample]
p_inds = valid_df[valid_df[column_name] == 1].index.tolist()
np_inds = valid_df[valid_df[column_name] == 0].index.tolist()
np_sample = sample(np_inds, 4 * len(p_inds))
valid_df = valid_df.loc[p_inds + np_sample]
return (train_df, valid_df)
train_df, valid_df = create_splits(all_xray_df, 0.2, 'pneumonia_class')
def my_image_augmentation():
my_idg = ImageDataGenerator(rescale=1.0 / 255.0, horizontal_flip=True, vertical_flip=False, height_shift_range=0.1, width_shift_range=0.1, rotation_range=20, shear_range=0.1, zoom_range=0.1)
return my_idg
def make_train_gen(train_df, img_size, batch_size):
idg = my_image_augmentation()
train_gen = idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return train_gen
def make_val_gen(valid_df, img_size, batch_size):
val_idg = ImageDataGenerator(rescale=1.0 / 255.0)
val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col='path', y_col='pneumonia_class', class_mode='raw', target_size=img_size, batch_size=batch_size)
return val_gen
batch_size = 64
img_size = (224, 224)
train_gen = make_train_gen(train_df, img_size, batch_size)
val_gen = make_val_gen(valid_df, img_size, batch_size) | code |
18131743/cell_21 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.metrics import accuracy_score
x_train, x_test, y_train, y_test = train_test_split(data_new2, y, test_size=0.3, random_state=42)
clf_rf = RandomForestClassifier(random_state=43)
clr_rf = clf_rf.fit(x_train, y_train)
ac = accuracy_score(y_test, clf_rf.predict(x_test))
cm = confusion_matrix(y_test, clf_rf.predict(x_test))
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
clf_rf = RandomForestClassifier(random_state=43, n_estimators=20, min_samples_split=2, min_samples_leaf=2, max_features='sqrt', max_depth=21, bootstrap=False)
clr_rf = clf_rf.fit(x_train, y_train)
print(r2_score(y_test, clf_rf.predict(x_test)))
print(mean_squared_error(y_test, clf_rf.predict(x_test))) | code |
18131743/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean', 'radius_mean', 'compactness_mean', 'concave points_mean', 'radius_se', 'perimeter_se', 'radius_worst', 'perimeter_worst', 'compactness_worst', 'concave points_worst', 'compactness_se', 'concave points_se', 'texture_worst', 'area_worst']
data_new = x.drop(rem, axis=1)
f, ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax) | code |
18131743/cell_25 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc_test = DecisionTreeClassifier(random_state=43, min_samples_leaf=8)
dtc_test = dtc_test.fit(x_train, y_train)
print(r2_score(y_test, dtc_test.predict(x_test)))
print(mean_squared_error(y_test, dtc_test.predict(x_test))) | code |
18131743/cell_23 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.ensemble import ExtraTreesRegressor
etr_test = ExtraTreesRegressor(random_state=43, n_estimators=100, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_depth=None, bootstrap=False)
etr_test = etr_test.fit(x_train, y_train)
print(r2_score(y_test, etr_test.predict(x_test)))
print(mean_squared_error(y_test, etr_test.predict(x_test))) | code |
18131743/cell_20 | [
"image_output_1.png"
] | from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.model_selection import RandomizedSearchCV
criterion = ['mse', 'friedman_mse', 'mae']
splitter = ['best', 'random']
max_depth = [None, 1, 11, 21, 31]
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
max_features = ['auto', 'sqrt', 'log2']
random_grid = {'max_features': max_features, 'max_depth': max_depth, 'min_samples_split': min_samples_split, 'min_samples_leaf': min_samples_leaf, 'criterion': criterion, 'splitter': splitter}
print(random_grid)
model_test = DecisionTreeRegressor(random_state=43)
model_random = RandomizedSearchCV(estimator=model_test, param_distributions=random_grid, n_iter=100, cv=3, random_state=42, n_jobs=-1)
model_random.fit(x_train, y_train)
model_random.best_params_ | code |
18131743/cell_29 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from mlxtend.regressor import StackingCVRegressor
from sklearn import ensemble
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn import ensemble
gbr = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=7, min_samples_split=8, learning_rate=0.1, loss='ls')
gbr = gbr.fit(x_train, y_train)
from mlxtend.regressor import StackingCVRegressor
rfc = RandomForestClassifier(random_state=43, n_estimators=20, min_samples_split=2, min_samples_leaf=2, max_features='sqrt', max_depth=21, bootstrap=False)
etc = ExtraTreesClassifier(random_state=43, n_estimators=300, min_samples_split=2, min_samples_leaf=1, max_features='sqrt', max_depth=31, bootstrap=True)
etr = ExtraTreesRegressor(random_state=43, n_estimators=100, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_depth=None, bootstrap=False)
rfr = RandomForestRegressor(random_state=43, n_estimators=200, min_samples_split=2, min_samples_leaf=1, max_features='log2', max_depth=11, bootstrap=False)
dtc = DecisionTreeClassifier(random_state=43, min_samples_leaf=8)
dtr = DecisionTreeRegressor(random_state=43, splitter='best', min_samples_split=2, min_samples_leaf=8, max_features='auto', max_depth=11, criterion='mse')
gbr = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=7, min_samples_split=8, learning_rate=0.1, loss='ls')
stack_gen = StackingCVRegressor(regressors=(rfc, etr, rfr, dtc, dtr), meta_regressor=dtr, use_features_in_secondary=True)
stack_gen_model = stack_gen.fit(x_train, y_train)
print(r2_score(y_test, stack_gen_model.predict(x_test)))
print(mean_squared_error(y_test, stack_gen_model.predict(x_test))) | code |
18131743/cell_26 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.tree import DecisionTreeRegressor
dtr_test = DecisionTreeRegressor(random_state=43, splitter='best', min_samples_split=2, min_samples_leaf=8, max_features='auto', max_depth=11, criterion='mse')
dtr_test = dtr_test.fit(x_train, y_train)
print(r2_score(y_test, dtr_test.predict(x_test)))
print(mean_squared_error(y_test, dtr_test.predict(x_test))) | code |
18131743/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
print(os.listdir('../input')) | code |
18131743/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat['diagnosis']) > 0.1]
plt.figure(figsize=(10, 10))
g = sns.heatmap(data_new[top_corr_features].corr(), annot=True, cmap='RdYlGn') | code |
18131743/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax) | code |
18131743/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape | code |
18131743/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.model_selection import train_test_split
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score, confusion_matrix
from sklearn.metrics import accuracy_score
x_train, x_test, y_train, y_test = train_test_split(data_new2, y, test_size=0.3, random_state=42)
clf_rf = RandomForestClassifier(random_state=43)
clr_rf = clf_rf.fit(x_train, y_train)
ac = accuracy_score(y_test, clf_rf.predict(x_test))
print('Accuracy is: ', ac)
cm = confusion_matrix(y_test, clf_rf.predict(x_test))
sns.heatmap(cm, annot=True, fmt='d') | code |
18131743/cell_3 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/data.csv')
data.info() | code |
18131743/cell_17 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
data_final.info() | code |
18131743/cell_31 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from mlxtend.regressor import StackingCVRegressor
from sklearn import ensemble
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn import ensemble
gbr = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=7, min_samples_split=8, learning_rate=0.1, loss='ls')
gbr = gbr.fit(x_train, y_train)
from mlxtend.regressor import StackingCVRegressor
rfc = RandomForestClassifier(random_state=43, n_estimators=20, min_samples_split=2, min_samples_leaf=2, max_features='sqrt', max_depth=21, bootstrap=False)
etc = ExtraTreesClassifier(random_state=43, n_estimators=300, min_samples_split=2, min_samples_leaf=1, max_features='sqrt', max_depth=31, bootstrap=True)
etr = ExtraTreesRegressor(random_state=43, n_estimators=100, min_samples_split=2, min_samples_leaf=1, max_features='auto', max_depth=None, bootstrap=False)
rfr = RandomForestRegressor(random_state=43, n_estimators=200, min_samples_split=2, min_samples_leaf=1, max_features='log2', max_depth=11, bootstrap=False)
dtc = DecisionTreeClassifier(random_state=43, min_samples_leaf=8)
dtr = DecisionTreeRegressor(random_state=43, splitter='best', min_samples_split=2, min_samples_leaf=8, max_features='auto', max_depth=11, criterion='mse')
gbr = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=7, min_samples_split=8, learning_rate=0.1, loss='ls')
stack_gen = StackingCVRegressor(regressors=(rfc, etr, rfr, dtc, dtr), meta_regressor=dtr, use_features_in_secondary=True)
stack_gen_model = stack_gen.fit(x_train, y_train)
def blend_models_predict(X):
return 0.05 * etc.predict(X) + 0.05 * gbr.predict(X) + 0.1 * rfc.predict(X) + 0.1 * rfr.predict(X) + 0.1 * dtc.predict(X) + 0.2 * dtr.predict(X) + 0.1 * etr.predict(X) + 0.3 * stack_gen_model.predict(np.array(X))
etc_model = etc.fit(x_train, y_train)
gbr_model = gbr.fit(x_train, y_train)
rfc_model = rfc.fit(x_train, y_train)
rfr_model = rfr.fit(x_train, y_train)
dtc_model = dtc.fit(x_train, y_train)
dtr_model = dtr.fit(x_train, y_train)
etr_model = etr.fit(x_train, y_train)
print(r2_score(y_test, blend_models_predict(x_test)))
print(mean_squared_error(y_test, blend_models_predict(x_test))) | code |
18131743/cell_24 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.ensemble import RandomForestRegressor
rfr_test = RandomForestRegressor(random_state=43, n_estimators=200, min_samples_split=2, min_samples_leaf=1, max_features='log2', max_depth=11, bootstrap=False)
rfr_test = rfr_test.fit(x_train, y_train)
print(r2_score(y_test, rfr_test.predict(x_test)))
print(mean_squared_error(y_test, rfr_test.predict(x_test))) | code |
18131743/cell_22 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import ExtraTreesClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn.ensemble import ExtraTreesClassifier
etc_test = ExtraTreesClassifier(random_state=43, n_estimators=300, min_samples_split=2, min_samples_leaf=1, max_features='sqrt', max_depth=31, bootstrap=True)
etc_test = etc_test.fit(x_train, y_train)
print(r2_score(y_test, etc_test.predict(x_test)))
print(mean_squared_error(y_test, etc_test.predict(x_test))) | code |
18131743/cell_27 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn import ensemble
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
#correlation map
f,ax = plt.subplots(figsize=(18, 18))
sns.heatmap(x.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
rem = ['perimeter_mean','radius_mean','compactness_mean','concave points_mean','radius_se','perimeter_se','radius_worst','perimeter_worst','compactness_worst','concave points_worst','compactness_se','concave points_se','texture_worst','area_worst']
data_new = x.drop(rem, axis=1)
f,ax = plt.subplots(figsize=(14, 14))
sns.heatmap(data_new.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax)
data_new['diagnosis'] = y
corrmat = data_new.corr().abs()
top_corr_features = corrmat.index[abs(corrmat["diagnosis"])>0.1]
plt.figure(figsize=(10,10))
g = sns.heatmap(data_new[top_corr_features].corr(),annot=True,cmap="RdYlGn")
req = ['texture_mean', 'area_mean', 'smoothness_mean', 'concavity_mean', 'symmetry_mean', 'area_se', 'concavity_se', 'smoothness_worst', 'concavity_worst', 'symmetry_worst', 'fractal_dimension_worst']
data_new2 = data_new[req]
data_new = data_new.drop(['diagnosis'], axis=1)
data_final = pd.read_csv('../input/data.csv')
y_final = data_final['diagnosis'].map({'M': 0, 'B': 1})
data_final = data_final[req]
from sklearn.model_selection import train_test_split
x_final = data_final
x_train, x_test, y_train, y_test = train_test_split(x_final, y_final, test_size=0.2, random_state=42)
x_train.shape
from sklearn import ensemble
gbr = ensemble.GradientBoostingRegressor(n_estimators=400, max_depth=7, min_samples_split=8, learning_rate=0.1, loss='ls')
gbr = gbr.fit(x_train, y_train)
print(r2_score(y_test, gbr.predict(x_test)))
print(mean_squared_error(y_test, gbr.predict(x_test))) | code |
18131743/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/data.csv')
import seaborn as sns
from pandas.plotting import scatter_matrix
import matplotlib.cm as cm
import matplotlib.pyplot as plt
data['diagnosis'] = data['diagnosis'].map({'M': 1, 'B': 0})
y = pd.DataFrame(data=data['diagnosis'])
list = ['Unnamed: 32', 'id', 'diagnosis']
x = data.drop(list, axis=1)
x.head() | code |
106194784/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data | code |
106194784/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()]
goldMedals = data[data.Medal == 'Gold']
goldMedals = goldMedals[np.isfinite(goldMedals['Age'])]
plt.tight_layout()
masters = goldMedals['Sport'][goldMedals['Age'] > 50]
plt.tight_layout()
# top 20 couontries participating
top_10_countries = data.Team.value_counts().sort_values(ascending = False).head(20)
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(18,8))
plt.title('Top 10 countries participate in the olympicss',size = 25)
top_10_countries.plot(kind = 'barh')
gold_medals = data.query('Medal == "Gold"')
total_gold_medals = gold_medals.Region.value_counts().head(15)
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(18, 8))
total_gold_medals.plot(kind='bar')
plt.title('Top 10 countries with most gold medals', size=25)
plt.xlabel('gold medals') | code |
106194784/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
region_df | code |
106194784/cell_29 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()]
goldMedals = data[data.Medal == 'Gold']
goldMedals = goldMedals[np.isfinite(goldMedals['Age'])]
plt.tight_layout()
masters = goldMedals['Sport'][goldMedals['Age'] > 50]
plt.tight_layout()
top_10_countries = data.Team.value_counts().sort_values(ascending=False).head(20)
plt.style.use('fivethirtyeight')
fig = plt.figure(figsize=(18, 8))
plt.title('Top 10 countries participate in the olympicss', size=25)
top_10_countries.plot(kind='barh') | code |
106194784/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.describe() | code |
106194784/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()] | code |
106194784/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"') | code |
106194784/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"') | code |
106194784/cell_24 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()]
goldMedals = data[data.Medal == 'Gold']
goldMedals = goldMedals[np.isfinite(goldMedals['Age'])]
goldMedals[goldMedals['Age'] > 50]['ID'].count() | code |
106194784/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False) | code |
106194784/cell_22 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()]
goldMedals = data[data.Medal == 'Gold']
goldMedals = goldMedals[np.isfinite(goldMedals['Age'])]
plt.figure(figsize=(26, 18))
plt.tight_layout()
sns.countplot(goldMedals['Age'])
plt.title('Distribution of Gold Medals') | code |
106194784/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.info() | code |
106194784/cell_27 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
data = pd.merge(athlete_df, region_df, how='left', on='NOC')
data.rename(columns={'region': 'Region', 'notes': 'Notes'}, inplace=True)
data.isnull().sum().sort_values(ascending=False)
data.query('Team == "Egypt"')
data.query('Team == "Egypt" and Medal == "Gold"')
data[data.Age == data.Age.max()]
goldMedals = data[data.Medal == 'Gold']
goldMedals = goldMedals[np.isfinite(goldMedals['Age'])]
plt.tight_layout()
masters = goldMedals['Sport'][goldMedals['Age'] > 50]
plt.figure(figsize=(20, 10))
plt.tight_layout()
sns.countplot(masters)
plt.title('Gold Medals for Athletes Over 50') | code |
106194784/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
athlete_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/athlete_events.csv')
region_df = pd.read_csv('../input/120-years-of-olympic-history-athletes-and-results/noc_regions.csv')
athlete_df | code |
106202299/cell_13 | [
"text_html_output_2.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import holidays
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import train_test_split, GroupKFold
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.pipeline import make_pipeline
import dateutil.easter as easter
import holidays
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 6)
pd.set_option('display.float_format', '{:.2f}'.format)
plt.style.use('seaborn-whitegrid')
plt.rc('figure', autolayout=True, titlesize=18, titleweight='bold')
plt.rc('axes', labelweight='bold', labelsize='large', titlesize=8, titlepad=2)
train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', parse_dates=['date'], index_col='row_id')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', parse_dates=['date'], index_col='row_id')
sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
def feature_engineer(df):
new_df = df.copy()
new_df['year'] = df['date'].dt.year - 2016
new_df['quarter'] = df['date'].dt.quarter
new_df['month'] = df['date'].dt.month
new_df['month_sin'] = np.sin(new_df['month'] * (2 * np.pi / 12))
new_df['day'] = df['date'].dt.day
new_df['day_of_week'] = df['date'].dt.dayofweek
new_df['day_of_year'] = df['date'].dt.dayofyear
new_df['week'] = df['date'].dt.week
new_df['is_weekend'] = new_df.apply(lambda x: 1 if x['day_of_week'] >= 5 else 0, axis=1)
new_df = new_df.drop('date', axis=1)
important_dates = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 124, 125, 126, 127, 140, 141, 167, 168, 169, 170, 171, 173, 174, 175, 176, 177, 178, 179, 180, 181, 203, 230, 231, 232, 233, 234, 282, 289, 290, 307, 308, 309, 310, 311, 312, 313, 317, 318, 319, 320, 360, 361, 362, 363, 364, 365]
new_df['important_dates'] = new_df['day_of_year'].apply(lambda x: x if x in important_dates else 0)
new_df = new_df.drop(['day_of_year', 'month'], axis=1)
return new_df
def get_holidays(df, country_name):
years_list = [2017, 2018, 2019, 2020, 2021]
country_map = {'Belgium': 'BE', 'France': 'FR', 'Germany': 'DE', 'Italy': 'IT', 'Poland': 'PL', 'Spain': 'ES'}
holiday_ = holidays.CountryHoliday(country_map[country_name], years=years_list)
df['holiday_name'] = df['date'].map(holiday_)
df['is_holiday'] = np.where(df['holiday_name'].notnull(), 1, 0)
df = df.drop('holiday_name', axis=1)
return df
sub_df['num_sold'] = np.array(test_df['num_sold'])
sub_df.to_csv('submission.csv', index=False)
sub_df | code |
106202299/cell_11 | [
"text_html_output_2.png",
"text_plain_output_3.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png",
"text_html_output_3.png"
] | _, ax = plt.subplots(12, 4, figsize=(14, 50))
test_df['num_sold'] = 0
oh_cols = ['day', 'day_of_week', 'week', 'quarter', 'important_dates']
encoder = OneHotEncoder(sparse=False)
for country, i in zip(train_df['country'].unique(), range(6)):
for store, k in zip(train_df['store'].unique(), range(2)):
for product, j in zip(train_df['product'].unique(), range(4)):
temp_df = None
temp_roll = None
temp_df = train_df.loc[(train_df['country'] == country) & (train_df['store'] == store) & (train_df['product'] == product), ['date', 'num_sold']]
temp_test_df = test_df.loc[(test_df['country'] == country) & (test_df['store'] == store) & (test_df['product'] == product), ['date', 'num_sold']]
temp_df_all = get_holidays(temp_df, country)
temp_test_df_all = get_holidays(temp_test_df, country)
temp_df_all = feature_engineer(temp_df_all)
temp_test_df_all = feature_engineer(temp_test_df_all)
X = temp_df_all.drop(['num_sold'], axis=1)
X_temp = None
X_temp = pd.concat([X[[col for col in X.columns if col not in oh_cols]], pd.DataFrame(encoder.fit_transform(X[oh_cols]), index=X.index)], axis=1)
X = X_temp
y = temp_df['num_sold']
X_fore = temp_test_df_all.drop(['num_sold'], axis=1)
X_fore_temp = None
X_fore_temp = pd.concat([X_fore[[col for col in X_fore.columns if col not in oh_cols]], pd.DataFrame(encoder.transform(X_fore[oh_cols]), index=X_fore.index)], axis=1)
X_fore = X_fore_temp
model = Ridge(tol=0.01, max_iter=1000000, random_state=0)
model.fit(X, y)
y_pred = pd.Series(model.predict(X), index=X.index)
y_fore = pd.Series(model.predict(X_fore), index=X_fore.index)
y.plot(ax=ax[i * 2 + k, j], color='0.95', style='.-', markerfacecolor='0.25', markersize=10, title=f'{country} {store} {product}', label='Actual Values', legend=True, xlabel='')
y_pred.plot(ax=ax[i * 2 + k, j], linewidth=0.4, label='Trend fitted', legend=True, xlabel='')
y_fore.plot(ax=ax[i * 2 + k, j], linewidth=0.3, label='Trend Forecasted', color='C3', legend=True, xlabel='')
test_df.loc[(test_df['country'] == country) & (test_df['store'] == store) & (test_df['product'] == product), ['num_sold']] = y_fore | code |
106202299/cell_7 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import train_test_split, GroupKFold
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.pipeline import make_pipeline
import dateutil.easter as easter
import holidays
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 6)
pd.set_option('display.float_format', '{:.2f}'.format)
plt.style.use('seaborn-whitegrid')
plt.rc('figure', autolayout=True, titlesize=18, titleweight='bold')
plt.rc('axes', labelweight='bold', labelsize='large', titlesize=8, titlepad=2)
train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', parse_dates=['date'], index_col='row_id')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', parse_dates=['date'], index_col='row_id')
sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
print('describtion of train data:')
display(train_df.describe(include='object'))
print('describtion of test data:')
display(test_df.describe(include='object')) | code |
106202299/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.model_selection import train_test_split, GroupKFold
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder, MinMaxScaler, StandardScaler
from sklearn.pipeline import make_pipeline
import dateutil.easter as easter
import holidays
from tqdm import tqdm
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 6)
pd.set_option('display.float_format', '{:.2f}'.format)
plt.style.use('seaborn-whitegrid')
plt.rc('figure', autolayout=True, titlesize=18, titleweight='bold')
plt.rc('axes', labelweight='bold', labelsize='large', titlesize=8, titlepad=2)
train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', parse_dates=['date'], index_col='row_id')
test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', parse_dates=['date'], index_col='row_id')
sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv')
print('shape of train data: ', train_df.shape)
display(train_df)
print('shape of test data', test_df.shape)
display(test_df)
print('shape of sample submission data: ', sub_df.shape)
display(sub_df) | code |
49119038/cell_21 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
import math
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values
mysize = dask_data.index.size.compute()
missing_count = missing_values / mysize * 100
missing_count
def haversine_dist(long_pickup, long_dropoff, lat_pickup, lat_dropoff):
distance = []
for i in range(len(long_pickup)):
long1, long2, lat1, lat2 = map(math.radians, (long_pickup[i], long_dropoff[i], lat_pickup[i], lat_dropoff[i]))
dlat = lat2 - lat1
dlong = long2 - long1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlong / 2) ** 2
distance.append(2 * math.asin(math.sqrt(a)) * 6371)
return distance
dask_data.columns
dist_km_interim = dask_data.map_partitions(lambda df: haversine_dist(df['pickup_longitude'], df['dropoff_longitude'], df['pickup_latitude'], df['dropoff_latitude']))
dask_data['dist_km'] = dist_km | code |
49119038/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns
with ProgressBar():
dask_data.head() | code |
49119038/cell_25 | [
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
import math
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values
mysize = dask_data.index.size.compute()
missing_count = missing_values / mysize * 100
missing_count
def haversine_dist(long_pickup, long_dropoff, lat_pickup, lat_dropoff):
distance = []
for i in range(len(long_pickup)):
long1, long2, lat1, lat2 = map(math.radians, (long_pickup[i], long_dropoff[i], lat_pickup[i], lat_dropoff[i]))
dlat = lat2 - lat1
dlong = long2 - long1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlong / 2) ** 2
distance.append(2 * math.asin(math.sqrt(a)) * 6371)
return distance
dask_data.columns
dist_km_interim = dask_data.map_partitions(lambda df: haversine_dist(df['pickup_longitude'], df['dropoff_longitude'], df['pickup_latitude'], df['dropoff_latitude']))
dask_data_new = dask_data.assign(dist_km=dist_km_interim)
dask_data['dist_km'] = haversine_dist(dask_data['pickup_longitude'], dask_data['dropoff_longitude'], dask_data['pickup_latitude'], dask_data['dropoff_latitude'])
dask_data_test['dist_km'] = haversine_dist(dask_data_test['pickup_longitude'], dask_data_test['dropoff_longitude'], dask_data_test['pickup_latitude'], dask_data_test['dropoff_latitude'])
dask_data.head(5) | code |
49119038/cell_23 | [
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
import math
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values
mysize = dask_data.index.size.compute()
missing_count = missing_values / mysize * 100
missing_count
def haversine_dist(long_pickup, long_dropoff, lat_pickup, lat_dropoff):
distance = []
for i in range(len(long_pickup)):
long1, long2, lat1, lat2 = map(math.radians, (long_pickup[i], long_dropoff[i], lat_pickup[i], lat_dropoff[i]))
dlat = lat2 - lat1
dlong = long2 - long1
a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlong / 2) ** 2
distance.append(2 * math.asin(math.sqrt(a)) * 6371)
return distance
dask_data.columns
dist_km_interim = dask_data.map_partitions(lambda df: haversine_dist(df['pickup_longitude'], df['dropoff_longitude'], df['pickup_latitude'], df['dropoff_latitude']))
dask_data_new = dask_data.assign(dist_km=dist_km_interim)
with ProgressBar():
dask_data_new.head() | code |
49119038/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import dask.dataframe as dd
import datetime
print('Start of Dask Read:', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
dask_data = dd.read_csv('./train.csv')
print('End of Dask Read:', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')) | code |
49119038/cell_19 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_html_output_1.png",
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values
mysize = dask_data.index.size.compute()
missing_count = missing_values / mysize * 100
missing_count
dask_data.columns | code |
49119038/cell_8 | [
"text_html_output_1.png"
] | import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns | code |
49119038/cell_15 | [
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values | code |
49119038/cell_16 | [
"text_html_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns
dask_data.compute().shape
len(dask_data.columns)
dask_data.isnull().sum().compute()
dask_data.fare_amount.mean().compute()
missing_values = dask_data.isnull().sum().compute()
missing_values
mysize = dask_data.index.size.compute()
missing_count = missing_values / mysize * 100
missing_count | code |
49119038/cell_3 | [
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
from dask.distributed import progress
from distributed import Client
client = Client()
client
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import math
import pandas_profiling
import dask
import dask.dataframe as dd
import datetime
import warnings
warnings.filterwarnings('ignore')
import os
print(os.listdir('../input/new-york-city-taxi-fare-prediction')) | code |
49119038/cell_10 | [
"text_plain_output_1.png"
] | from dask.diagnostics import ProgressBar
import dask.dataframe as dd
import datetime
dask_data = dd.read_csv('./train.csv')
dask_data.columns
display(dask_data.head(2))
print('Information:')
dask_data.compute().info()
print('Shape:')
dask_data.compute().shape
print('Describe:')
dask_data.describe().compute()
print('Columns:')
len(dask_data.columns)
print('Empty Values:')
dask_data.isnull().sum().compute()
print('Taxi fare Mean Value:')
dask_data.fare_amount.mean().compute() | code |
49119038/cell_12 | [
"text_plain_output_1.png"
] | pandas_data.shape
pandas_data.head(2)
pandas_data.describe
pandas_data['fare_amount'].unique()
pandas_data.isnull().sum()
pandas_data.isna().sum() | code |
121152199/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100
sending = df.groupby('dim_device_app_combo').sum()
sending.sort_values('sent_message', ascending=False, inplace=True)
sending.plot.bar(y='sent_message')
sending | code |
121152199/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum() | code |
121152199/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100 | code |
121152199/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.head() | code |
121152199/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100
sending = df.groupby('dim_device_app_combo').sum()
sending.sort_values('sent_message', ascending=False, inplace=True)
sending
df['time_spend'] = df['ts_max'] - df['ts_min']
df['time_spend'] = df['time_spend'].dt.total_seconds()
time_spend = sending = df.groupby('dim_device_app_combo').mean().sort_values('time_spend')
time_spend.plot.bar(y='time_spend') | code |
121152199/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
121152199/cell_7 | [
"text_html_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100
device = df['dim_device_app_combo'].value_counts()
device.plot.bar(x='device', y='val', rot=90) | code |
121152199/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100
device = df['dim_device_app_combo'].value_counts()
device.plot.pie() | code |
121152199/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.describe() | code |
121152199/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/airbnb-user-pathways/airbnb.csv')
df.drop('id_visitor', axis=1, inplace=True)
df.drop('id_session', axis=1, inplace=True)
df.drop('next_id_session', axis=1, inplace=True)
df.drop('dim_user_agent', axis=1, inplace=True)
df.isnull().sum()
df.corr() * 100
sending = df.groupby('dim_device_app_combo').sum()
sending.sort_values('sent_message', ascending=False, inplace=True)
sending
df['time_spend'] = df['ts_max'] - df['ts_min']
df['time_spend'] = df['time_spend'].dt.total_seconds()
time_spend = sending = df.groupby('dim_device_app_combo').mean().sort_values('time_spend')
cout = df['sent_message'].sum()
sent_message = df.groupby(['sent_message', 'sent_booking_request']).count()
sent_message | code |
88093824/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
lr = LogisticRegression(solver='liblinear')
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
from sklearn.metrics import recall_score
recall_score(y_test, y_pred) | code |
88093824/cell_19 | [
"text_plain_output_1.png"
] | from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.isnull().sum()
data.dtypes
X = data.drop('Class', axis=1)
y = data['Class']
from xgboost import XGBRegressor
xgb = XGBRegressor()
xgb.fit(X_train, y_train)
xgb_pred = xgb.predict(X_test)
def xgb_f1(y, t, threshold=0.5):
y_bin = (y > threshold).astype(int)
return ('f1', f1_score(t, y_bin))
def xgb_recall(y, t, threshold=0.5):
y_bin = (y > threshold).astype(int)
return ('recall score', recall_score(t, y_bin))
xgb_recall(xgb_pred, y_test) | code |
88093824/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88093824/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.isnull().sum()
data.dtypes | code |
88093824/cell_18 | [
"text_plain_output_1.png"
] | from sklearn.metrics import f1_score
from xgboost import XGBRegressor
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.isnull().sum()
data.dtypes
X = data.drop('Class', axis=1)
y = data['Class']
from xgboost import XGBRegressor
xgb = XGBRegressor()
xgb.fit(X_train, y_train)
xgb_pred = xgb.predict(X_test)
def xgb_f1(y, t, threshold=0.5):
y_bin = (y > threshold).astype(int)
return ('f1', f1_score(t, y_bin))
xgb_f1(xgb_pred, y_test) | code |
88093824/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.isnull().sum()
data.dtypes
data['Class'].value_counts() | code |
88093824/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.head() | code |
88093824/cell_12 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
lr = LogisticRegression(solver='liblinear')
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
print(f'F1 score is {f1_score(y_test, y_pred)}') | code |
88093824/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
data.isnull().sum() | code |
50223616/cell_9 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
lr = LabelEncoder()
for i in categorial_col:
df[i] = lr.fit_transform(df[i])
df[categorial_col.columns].head() | code |
50223616/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum() | code |
50223616/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
df.describe() | code |
50223616/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df.head() | code |
50223616/cell_11 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
dtc.fit(X_train, y_train)
print_score(dtc, X_train, y_train, X_test, y_test, train=True)
print_score(dtc, X_train, y_train, X_test, y_test, train=False) | code |
50223616/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
50223616/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
categorial_col.head() | code |
50223616/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(30, 30))
sns.heatmap(df.corr(), annot=True, cmap='RdYlGn', annot_kws={'size': 15}) | code |
50223616/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any()
categorial_col = df.select_dtypes(include='object')
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
X = df.drop('Attrition', axis=1)
y = df.Attrition
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
X_train.shape | code |
50223616/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv')
df = df.drop(['EmployeeNumber', 'EmployeeCount', 'StandardHours'], axis=1)
df.isna().sum()
df.isnull().values.any() | code |
122251150/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
# categorical variable: CryoSleep
fig, ax = plt.subplots(1,2, figsize =(10, 5))
sns.countplot(data=train, x='CryoSleep', ax=ax[0])
sns.countplot(data=train, x='CryoSleep', hue='Transported', ax=ax[1])
fig, axs = plt.subplots(3, 3, figsize=(17, 10))
sns.countplot(data=train, x='HomePlanet', hue='CryoSleep', ax=axs[0, 0])
sns.violinplot(train, x='CryoSleep', y='Age', ax=axs[0, 1])
sns.countplot(data=train, x='VIP', hue='CryoSleep', ax=axs[0, 2])
sns.violinplot(train, x='CryoSleep', y='FoodCourt', ax=axs[1, 0])
sns.violinplot(train, x='CryoSleep', y='ShoppingMall', ax=axs[1, 1])
sns.violinplot(train, x='CryoSleep', y='Spa', ax=axs[1, 2])
sns.violinplot(train, x='CryoSleep', y='VRDeck', ax=axs[2, 0])
sns.violinplot(train, x='CryoSleep', y='RoomService', ax=axs[2, 1])
sns.countplot(data=train, x='Destination', hue='CryoSleep', ax=axs[2, 2]) | code |
122251150/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts() | code |
122251150/cell_34 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
train['VRDeck'].isnull().sum() | code |
122251150/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
# categorical variable: CryoSleep
fig, ax = plt.subplots(1,2, figsize =(10, 5))
sns.countplot(data=train, x='CryoSleep', ax=ax[0])
sns.countplot(data=train, x='CryoSleep', hue='Transported', ax=ax[1])
# categorical variable: CryoSleep
fig, axs = plt.subplots(3,3, figsize =(17, 10))
sns.countplot(data=train, x='HomePlanet', hue='CryoSleep', ax=axs[0,0])
sns.violinplot(train, x = 'CryoSleep', y='Age', ax=axs[0,1])
sns.countplot(data=train, x='VIP', hue='CryoSleep', ax=axs[0,2])
sns.violinplot(train, x = 'CryoSleep', y='FoodCourt', ax=axs[1,0])
sns.violinplot(train, x = 'CryoSleep', y='ShoppingMall', ax=axs[1,1])
sns.violinplot(train, x = 'CryoSleep', y='Spa', ax=axs[1,2])
sns.violinplot(train, x = 'CryoSleep', y='VRDeck', ax=axs[2,0])
sns.violinplot(train, x = 'CryoSleep', y='RoomService', ax=axs[2,1])
sns.countplot(data=train, x='Destination', hue='CryoSleep', ax=axs[2,2])
fig, axs = plt.subplots(2, 3, figsize=(17, 10))
sns.distplot(train['RoomService'], ax=axs[0, 0])
sns.distplot(train['FoodCourt'], ax=axs[0, 1])
sns.distplot(train['ShoppingMall'], ax=axs[0, 2])
sns.distplot(train['Spa'], ax=axs[1, 0])
sns.distplot(train['VRDeck'], ax=axs[1, 1]) | code |
122251150/cell_19 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
sns.countplot(data=train, x='CryoSleep', ax=ax[0])
sns.countplot(data=train, x='CryoSleep', hue='Transported', ax=ax[1]) | code |
122251150/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
122251150/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.info() | code |
122251150/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
train['CryoSleep'].isnull().sum() | code |
122251150/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
missing_vals_predict = pd.DataFrame(predict.isna().sum(), columns=['Sum'])
missing_vals_predict = missing_vals_predict.sort_values(by='Sum', ascending=False)
missing_vals_predict = missing_vals_predict[missing_vals_predict['Sum'] > 0]
missing_vals_predict['Percent'] = missing_vals_predict['Sum'] / 8693 * 100
missing_vals_predict | code |
122251150/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
train.head() | code |
122251150/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train | code |
122251150/cell_36 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv')
predict = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv')
train.dtypes.value_counts()
missing_vals_train = pd.DataFrame(train.isna().sum(), columns=['Sum'])
missing_vals_train = missing_vals_train.sort_values(by='Sum', ascending=False)
missing_vals_train = missing_vals_train[missing_vals_train['Sum'] > 0]
missing_vals_train['Percent'] = missing_vals_train['Sum'] / 8693 * 100
missing_vals_train
# categorical variable: CryoSleep
fig, ax = plt.subplots(1,2, figsize =(10, 5))
sns.countplot(data=train, x='CryoSleep', ax=ax[0])
sns.countplot(data=train, x='CryoSleep', hue='Transported', ax=ax[1])
# categorical variable: CryoSleep
fig, axs = plt.subplots(3,3, figsize =(17, 10))
sns.countplot(data=train, x='HomePlanet', hue='CryoSleep', ax=axs[0,0])
sns.violinplot(train, x = 'CryoSleep', y='Age', ax=axs[0,1])
sns.countplot(data=train, x='VIP', hue='CryoSleep', ax=axs[0,2])
sns.violinplot(train, x = 'CryoSleep', y='FoodCourt', ax=axs[1,0])
sns.violinplot(train, x = 'CryoSleep', y='ShoppingMall', ax=axs[1,1])
sns.violinplot(train, x = 'CryoSleep', y='Spa', ax=axs[1,2])
sns.violinplot(train, x = 'CryoSleep', y='VRDeck', ax=axs[2,0])
sns.violinplot(train, x = 'CryoSleep', y='RoomService', ax=axs[2,1])
sns.countplot(data=train, x='Destination', hue='CryoSleep', ax=axs[2,2])
fig, axs = plt.subplots(2,3, figsize =(17, 10))
sns.distplot(train['RoomService'], ax=axs[0,0])
sns.distplot(train['FoodCourt'], ax=axs[0,1])
sns.distplot(train['ShoppingMall'], ax=axs[0,2])
sns.distplot(train['Spa'], ax=axs[1,0])
sns.distplot(train['VRDeck'], ax=axs[1,1])
fig, axs = plt.subplots(3, 3, figsize=(17, 10))
sns.countplot(data=train, x='HomePlanet', hue='VIP', ax=axs[0, 0])
sns.violinplot(train, x='VIP', y='Age', ax=axs[0, 1])
sns.countplot(data=train, x='CryoSleep', hue='VIP', ax=axs[0, 2])
sns.violinplot(train, x='VIP', y='FoodCourt', ax=axs[1, 0])
sns.violinplot(train, x='VIP', y='ShoppingMall', ax=axs[1, 1])
sns.violinplot(train, x='VIP', y='Spa', ax=axs[1, 2])
sns.violinplot(train, x='VIP', y='VRDeck', ax=axs[2, 0])
sns.violinplot(train, x='VIP', y='RoomService', ax=axs[2, 1])
sns.countplot(data=train, x='Destination', hue='VIP', ax=axs[2, 2]) | code |
72107386/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
train.head() | code |
72107386/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
test.head() | code |
72107386/cell_15 | [
"text_html_output_1.png"
] | from catboost import Pool, CatBoostRegressor
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
X_train = train.drop(['id', 'target'], axis=1)
y_train = train['target']
X_test = test.drop(['id'], axis=1)
cat_features = [i for i, col in enumerate(X_train.columns) if 'cat' in col]
cat_features
train_pool = Pool(X_train, y_train, cat_features=cat_features)
test_pool = Pool(X_test, cat_features=cat_features)
model = CatBoostRegressor(verbose=False)
model.fit(train_pool) | code |
72107386/cell_17 | [
"text_html_output_1.png"
] | from catboost import Pool, CatBoostRegressor
import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
X_train = train.drop(['id', 'target'], axis=1)
y_train = train['target']
X_test = test.drop(['id'], axis=1)
cat_features = [i for i, col in enumerate(X_train.columns) if 'cat' in col]
cat_features
train_pool = Pool(X_train, y_train, cat_features=cat_features)
test_pool = Pool(X_test, cat_features=cat_features)
model = CatBoostRegressor(verbose=False)
model.fit(train_pool)
model.get_best_score() | code |
72107386/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/30-days-of-ml/train.csv')
test = pd.read_csv('../input/30-days-of-ml/test.csv')
X_train = train.drop(['id', 'target'], axis=1)
y_train = train['target']
X_test = test.drop(['id'], axis=1)
cat_features = [i for i, col in enumerate(X_train.columns) if 'cat' in col]
cat_features | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.