path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
106208751/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv') col_remove = ['Critic_Score', 'Critic_Count', 'User_Score', 'User_Count', 'Developer', 'Rating'] data_clear = data.drop(labels=col_remove, axis=1) data_clear.isnull().sum() data_clear = data_clear.dropna(axis=0) data_clear.head()
code
106208751/cell_5
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/video-game-sales-with-ratings/Video_Games_Sales_as_at_22_Dec_2016.csv') data.info() data.head()
code
89139202/cell_9
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') print(df_train['day'].value_counts()) print(df_train['month'].value_counts())
code
89139202/cell_4
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') print(df_train.isnull().sum()) print(df_train.isna().sum()) print(f'Duplicates: {df_train.duplicated().sum()}')
code
89139202/cell_2
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) print(df_train.head()) df_train.drop(['row_id'], axis=1, inplace=True) print(df_train.head()) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv')
code
89139202/cell_15
[ "text_plain_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') fig, ax = plt.subplots(2, 2, figsize=(15, 15)) ax[0, 0].hist(df_train['congestion'], bins=100) ax[0, 0].set_xlabel('Congestion') ax[0, 1].hist(df_train['x']) ax[0, 1].set_xlabel('x') ax[1, 0].hist(df_train['y']) ax[1, 0].set_xlabel('y') ax[1,1].hist(df_train['direction'], bins=df_train['direction'].nunique(), align='right') ax[1, 1].set_xlabel('direction') fig.show() fig, ax = plt.subplots(1, 2, figsize=(15, 7)) ax[0].hist2d(df_train['congestion'], df_train['day'], bins = [50, 7], cmap=mpl.cm.Blues) ax[0].set_xlabel('Congestion') ax[0].set_ylabel('Weekday') ax[1].hist2d(df_train['congestion'], df_train['month'], bins = [50, 12], cmap=mpl.cm.Blues, range=([0, 100], [0, 11])) ax[1].set_xlabel('Congestion') ax[1].set_ylabel('Month') plt.show() fig, ax = plt.subplots(3, 3, figsize=(30, 30)) unique_directions = df_train['direction'].unique() title_map = { 0 : 'Monday', 1 : 'Tuesday', 2 : 'Wednesday', 3 : 'Thursday', 4 : 'Friday', 5 : 'Saturday', 6 : 'Sunday', } ax = ax.ravel() for i in range(7): day_view = df_train[df_train['day'] == i] for direction in unique_directions: direction_view = day_view[day_view['direction'] == direction] ax[i].hist(direction_view['congestion'], label=direction, bins=50) ax[i].legend(loc='best') ax[i].set_xlabel('Congestion') ax[i].set_ylabel('Count/bin') ax[i].set_title(title_map[i]) ax = ax.reshape(3, 3) plt.show() fig, ax = plt.subplots(1, 1, figsize=(10, 10)) sns.heatmap(df_train.corr(), cmap=sns.color_palette('vlag', as_cmap=True), square=True, ax=ax, annot=True) plt.show()
code
89139202/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') df_train.describe()
code
89139202/cell_10
[ "text_plain_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') fig, ax = plt.subplots(2, 2, figsize=(15, 15)) ax[0, 0].hist(df_train['congestion'], bins=100) ax[0, 0].set_xlabel('Congestion') ax[0, 1].hist(df_train['x']) ax[0, 1].set_xlabel('x') ax[1, 0].hist(df_train['y']) ax[1, 0].set_xlabel('y') ax[1,1].hist(df_train['direction'], bins=df_train['direction'].nunique(), align='right') ax[1, 1].set_xlabel('direction') fig.show() fig, ax = plt.subplots(1, 2, figsize=(15, 7)) ax[0].hist2d(df_train['congestion'], df_train['day'], bins=[50, 7], cmap=mpl.cm.Blues) ax[0].set_xlabel('Congestion') ax[0].set_ylabel('Weekday') ax[1].hist2d(df_train['congestion'], df_train['month'], bins=[50, 12], cmap=mpl.cm.Blues, range=([0, 100], [0, 11])) ax[1].set_xlabel('Congestion') ax[1].set_ylabel('Month') plt.show()
code
89139202/cell_12
[ "text_html_output_1.png" ]
import matplotlib as mpl import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') fig, ax = plt.subplots(2, 2, figsize=(15, 15)) ax[0, 0].hist(df_train['congestion'], bins=100) ax[0, 0].set_xlabel('Congestion') ax[0, 1].hist(df_train['x']) ax[0, 1].set_xlabel('x') ax[1, 0].hist(df_train['y']) ax[1, 0].set_xlabel('y') ax[1,1].hist(df_train['direction'], bins=df_train['direction'].nunique(), align='right') ax[1, 1].set_xlabel('direction') fig.show() fig, ax = plt.subplots(1, 2, figsize=(15, 7)) ax[0].hist2d(df_train['congestion'], df_train['day'], bins = [50, 7], cmap=mpl.cm.Blues) ax[0].set_xlabel('Congestion') ax[0].set_ylabel('Weekday') ax[1].hist2d(df_train['congestion'], df_train['month'], bins = [50, 12], cmap=mpl.cm.Blues, range=([0, 100], [0, 11])) ax[1].set_xlabel('Congestion') ax[1].set_ylabel('Month') plt.show() fig, ax = plt.subplots(3, 3, figsize=(30, 30)) unique_directions = df_train['direction'].unique() title_map = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'} ax = ax.ravel() for i in range(7): day_view = df_train[df_train['day'] == i] for direction in unique_directions: direction_view = day_view[day_view['direction'] == direction] ax[i].hist(direction_view['congestion'], label=direction, bins=50) ax[i].legend(loc='best') ax[i].set_xlabel('Congestion') ax[i].set_ylabel('Count/bin') ax[i].set_title(title_map[i]) ax = ax.reshape(3, 3) plt.show()
code
89139202/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd df_train = pd.read_csv('../input/tabular-playground-series-mar-2022/train.csv') df_train['time'] = pd.to_datetime(df_train['time']) df_train.drop(['row_id'], axis=1, inplace=True) df_test = pd.read_csv('../input/tabular-playground-series-mar-2022/test.csv') fig, ax = plt.subplots(2, 2, figsize=(15, 15)) ax[0, 0].hist(df_train['congestion'], bins=100) ax[0, 0].set_xlabel('Congestion') ax[0, 1].hist(df_train['x']) ax[0, 1].set_xlabel('x') ax[1, 0].hist(df_train['y']) ax[1, 0].set_xlabel('y') ax[1, 1].hist(df_train['direction'], bins=df_train['direction'].nunique(), align='right') ax[1, 1].set_xlabel('direction') fig.show()
code
74044709/cell_6
[ "text_plain_output_1.png" ]
pip install openai
code
74044709/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74044709/cell_8
[ "text_plain_output_1.png" ]
"""prompt=text t=True while t: person=str(input('Morty:')) prompt+='Morty:'+person+' ' prompt+='Rick:' output=model(prompt) prompt+=output+' ' print('Rick:',output) if person=='bey': print('Rick:ok I'm done, go away') t=False """
code
74044709/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/rickmorty-scripts/RickAndMortyScripts.csv') data.head()
code
74044709/cell_12
[ "text_html_output_1.png" ]
"""prompt=newtext t=True while t: person=str(input('Morty:')) prompt+='Morty:'+person+' ' prompt+='Rick:' output=model(prompt) prompt+=output+' ' print('Rick:',output) if person=='bey': print('Rick:ok I'm done, go away') t=False"""
code
90143425/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train[['Group', 'Group_Num']] = train.PassengerId.str.split('_', expand=True) test[['Group', 'Group_Num']] = test.PassengerId.str.split('_', expand=True) for i in train.columns: print(f'{i}: {train[i].nunique()}')
code
90143425/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train[['Group', 'Group_Num']] = train.PassengerId.str.split('_', expand=True) test[['Group', 'Group_Num']] = test.PassengerId.str.split('_', expand=True) train.head()
code
90143425/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train[['Group', 'Group_Num']] = train.PassengerId.str.split('_', expand=True) test[['Group', 'Group_Num']] = test.PassengerId.str.split('_', expand=True) mode_list = ['HomePlanet', 'CryoSleep', 'Destination', 'VIP', 'Deck', 'Side'] for i in mode_list: train[i] = train[i].fillna(train[i].mode()[0]) test[i] = test[i].fillna(train[i].mode()[0]) median_list = ['Age', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck'] for i in median_list: train[i] = train[i].fillna(train[i].median()) test[i] = test[i].fillna(train[i].median()) for i in train.columns: if train[i].isna().sum() > 0: print(f'{i}: {train[i].isna().sum()}')
code
90143425/cell_1
[ "text_plain_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90143425/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train[['Group', 'Group_Num']] = train.PassengerId.str.split('_', expand=True) test[['Group', 'Group_Num']] = test.PassengerId.str.split('_', expand=True) for i in train.columns: if train[i].isna().sum() > 0: print(f'{i}: {train[i].isna().sum()}') print('\n', 'test NaNs', '\n') for i in test.columns: if test[i].isna().sum() > 0: print(f'{i}: {test[i].isna().sum()}')
code
90143425/cell_16
[ "text_html_output_1.png" ]
from category_encoders.ordinal import OrdinalEncoder from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline from category_encoders.ordinal import OrdinalEncoder from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression oe = OrdinalEncoder() scaler = StandardScaler() logit = LogisticRegression() pipe = Pipeline([('Encoder', oe), ('Scaler', scaler), ('Logistic Regression', logit)]) from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix pipe.fit(X_train, y_train) y_pred = pipe.predict(X_test) print(classification_report(y_test, y_pred))
code
90143425/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train.info()
code
90143425/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train[['Group', 'Group_Num']] = train.PassengerId.str.split('_', expand=True) test[['Group', 'Group_Num']] = test.PassengerId.str.split('_', expand=True) mode_list = ['HomePlanet', 'CryoSleep', 'Destination', 'VIP', 'Deck', 'Side'] for i in mode_list: train[i] = train[i].fillna(train[i].mode()[0]) test[i] = test[i].fillna(train[i].mode()[0]) for i in train.columns: if train[i].isna().sum() > 0: print(f'{i}: {train[i].isna().sum()}')
code
90143425/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/spaceship-titanic/train.csv') test = pd.read_csv('/kaggle/input/spaceship-titanic/test.csv') train[['Deck', 'Cabin_Num', 'Side']] = train.Cabin.str.split('/', expand=True) test[['Deck', 'Cabin_Num', 'Side']] = test.Cabin.str.split('/', expand=True) train.head()
code
106198899/cell_4
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras import layers pre_trained_model = InceptionV3(input_shape=(256, 256, 3), include_top=False, weights=None) for layer in pre_trained_model.layers: layer.trainable = False
code
106198899/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras import layers pre_trained_model = InceptionV3(input_shape=(256, 256, 3), include_top=False, weights=None) for layer in pre_trained_model.layers: layer.trainable = False pre_trained_model.summary() last_layer = pre_trained_model.get_layer('mixed7') print('last layer output shape: ', last_layer.output_shape) last_output = last_layer.output
code
106198899/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/mayo-clinic-strip-ai/train.csv') test_df = pd.read_csv('../input/mayo-clinic-strip-ai/test.csv') train_df.head()
code
106198899/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd "import os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))"
code
106198899/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras import Model from tensorflow.keras import layers from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras import layers pre_trained_model = InceptionV3(input_shape=(256, 256, 3), include_top=False, weights=None) for layer in pre_trained_model.layers: layer.trainable = False pre_trained_model.summary() last_layer = pre_trained_model.get_layer('mixed7') last_output = last_layer.output from tensorflow.keras.optimizers import RMSprop from tensorflow.keras import Model x = layers.Flatten()(last_output) x = layers.Dense(1024, activation='relu')(x) x = layers.Dropout(0.2)(x) x = layers.Dense(1, activation='sigmoid')(x) model = Model(pre_trained_model.input, x) model.summary()
code
106198899/cell_15
[ "text_plain_output_1.png" ]
from openslide import OpenSlide from tqdm import tqdm import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf train_df = pd.read_csv('../input/mayo-clinic-strip-ai/train.csv') test_df = pd.read_csv('../input/mayo-clinic-strip-ai/test.csv') train_df_sample = train_df.iloc[:754].copy() def preprocess(image_path): slide = OpenSlide(image_path) region = (1000, 1000) size = (5000, 5000) image = slide.read_region(region, 0, size) image = image.convert('RGB') image = tf.image.resize(image, (256, 256)) image = np.array(image) return image x_train = [] for i in tqdm(train_df_sample['file_path']): x1 = preprocess(i) x_train.append(x1) x_train = np.array(x_train) y_train = np.array(train_df_sample['target']) X_train, X_test, Y_train, Y_test = train_test_split(x_train, y_train, test_size=0.2)
code
106198899/cell_3
[ "text_plain_output_1.png" ]
"""!wget --no-check-certificate https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 -O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"""
code
106198899/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from openslide import OpenSlide from tqdm import tqdm import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf train_df = pd.read_csv('../input/mayo-clinic-strip-ai/train.csv') test_df = pd.read_csv('../input/mayo-clinic-strip-ai/test.csv') train_df_sample = train_df.iloc[:754].copy() def preprocess(image_path): slide = OpenSlide(image_path) region = (1000, 1000) size = (5000, 5000) image = slide.read_region(region, 0, size) image = image.convert('RGB') image = tf.image.resize(image, (256, 256)) image = np.array(image) return image x_train = [] for i in tqdm(train_df_sample['file_path']): x1 = preprocess(i) x_train.append(x1)
code
106198899/cell_5
[ "text_html_output_1.png" ]
from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras import layers pre_trained_model = InceptionV3(input_shape=(256, 256, 3), include_top=False, weights=None) for layer in pre_trained_model.layers: layer.trainable = False pre_trained_model.summary()
code
106211900/cell_11
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torchvision import transforms import numpy as np import pandas as pd train_set = pd.read_csv('../input/digit-recognizer/train.csv') test_set = pd.read_csv('../input/digit-recognizer/test.csv') valid_size = 0.1 data_nums = len(train_set) indices = list(range(data_nums)) np.random.shuffle(indices) splits = int(np.floor(valid_size * data_nums)) train_ind, valid_ind = (indices[splits:], indices[:splits]) train_s = SubsetRandomSampler(train_ind) valid_s = SubsetRandomSampler(valid_ind) transforms_dic = {'train': transforms.Compose([transforms.ToPILImage(), transforms.RandomAffine(8, translate=(0, 0.1), scale=(0.8, 1.2)), transforms.ToTensor()]), 'valid': transforms.Compose([transforms.ToTensor()]), 'test': transforms.Compose([transforms.ToTensor()])} train = Datamnist(train_set, transforms_dic['train']) valid = Datamnist(train_set, transforms_dic['valid']) test = Datamnist(test_set, transforms_dic['test'], labels=False) train_loader = DataLoader(train, batch_size=128, sampler=train_s) valid_loader = DataLoader(valid, sampler=valid_s) test_loader = DataLoader(test) Trainmodel = Runmodel(train_loader, valid_loader) Trainmodel.train() Trainmodel.predict(test_loader) print('ok')
code
106211900/cell_10
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader from torch.utils.data.sampler import SubsetRandomSampler from torchvision import transforms import numpy as np import pandas as pd train_set = pd.read_csv('../input/digit-recognizer/train.csv') test_set = pd.read_csv('../input/digit-recognizer/test.csv') valid_size = 0.1 data_nums = len(train_set) indices = list(range(data_nums)) np.random.shuffle(indices) splits = int(np.floor(valid_size * data_nums)) train_ind, valid_ind = (indices[splits:], indices[:splits]) train_s = SubsetRandomSampler(train_ind) valid_s = SubsetRandomSampler(valid_ind) transforms_dic = {'train': transforms.Compose([transforms.ToPILImage(), transforms.RandomAffine(8, translate=(0, 0.1), scale=(0.8, 1.2)), transforms.ToTensor()]), 'valid': transforms.Compose([transforms.ToTensor()]), 'test': transforms.Compose([transforms.ToTensor()])} train = Datamnist(train_set, transforms_dic['train']) valid = Datamnist(train_set, transforms_dic['valid']) test = Datamnist(test_set, transforms_dic['test'], labels=False) train_loader = DataLoader(train, batch_size=128, sampler=train_s) valid_loader = DataLoader(valid, sampler=valid_s) test_loader = DataLoader(test) Trainmodel = Runmodel(train_loader, valid_loader) Trainmodel.train()
code
1004711/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['SibSp', 'Survived']].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
1004711/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Pclass', 'Survived']].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
1004711/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Sex', 'Survived']].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
1004711/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd train_df = pd.read_csv('../input/train.csv') test_df = pd.read_csv('../input/test.csv') combine = [train_df, test_df] train_df[['Parch', 'Survived']].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
code
105197876/cell_9
[ "text_html_output_1.png" ]
from matplotlib.offsetbox import AnchoredText from scipy.signal import periodogram from sklearn.model_selection import train_test_split import math import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb def seasonal_plot(X, y, period, freq, ax=None): if ax is None: _, ax = plt.subplots() palette = sns.color_palette("husl", n_colors=X[period].nunique(),) ax = sns.lineplot( x=freq, y=y, hue=period, data=X, ci=False, ax=ax, palette=palette, legend=False, ) ax.set_title(f"Seasonal Plot ({period}/{freq})") for line, name in zip(ax.lines, X[period].unique()): y_ = line.get_ydata()[-1] ax.annotate( name, xy=(1, y_), xytext=(6, 0), color=line.get_color(), xycoords=ax.get_yaxis_transform(), textcoords="offset points", size=14, va="center", ) return ax def plot_periodogram(ts, detrend='linear', ax=None): from scipy.signal import periodogram fs = pd.Timedelta("1Y") / pd.Timedelta("1D") freqencies, spectrum = periodogram( ts, fs=fs, detrend=detrend, window="boxcar", scaling='spectrum', ) if ax is None: _, ax = plt.subplots() ax.step(freqencies, spectrum, color="purple") ax.set_xscale("log") ax.set_xticks([1, 2, 4, 6, 12, 26, 52, 104]) ax.set_xticklabels( [ "Annual (1)", "Semiannual (2)", "Quarterly (4)", "Bimonthly (6)", "Monthly (12)", "Biweekly (26)", "Weekly (52)", "Semiweekly (104)", ], rotation=30, ) ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) ax.set_ylabel("Variance") ax.set_title("Periodogram") return ax def lagplot(x, y=None, lag=1, standardize=False, ax=None, **kwargs): from matplotlib.offsetbox import AnchoredText x_ = x.shift(lag) if standardize: x_ = (x_ - x_.mean()) / x_.std() if y is not None: y_ = (y - y.mean()) / y.std() if standardize else y else: y_ = x corr = y_.corr(x_) if ax is None: fig, ax = plt.subplots() scatter_kws = dict( alpha=0.75, s=3, ) line_kws = dict(color='C3', ) ax = sns.regplot(x=x_, y=y_, scatter_kws=scatter_kws, line_kws=line_kws, lowess=True, ax=ax, **kwargs) at = AnchoredText( f"{corr:.2f}", prop=dict(size="large"), frameon=True, loc="upper left", ) at.patch.set_boxstyle("square, pad=0.0") ax.add_artist(at) ax.set(title=f"Lag {lag}", xlabel=x_.name, ylabel=y_.name) return ax def plot_lags(x, y=None, lags=6, nrows=1, lagplot_kwargs={}, **kwargs): import math kwargs.setdefault('nrows', nrows) kwargs.setdefault('ncols', math.ceil(lags / nrows)) kwargs.setdefault('figsize', (kwargs['ncols'] * 2, nrows * 2 + 0.5)) fig, axs = plt.subplots(sharex=True, sharey=True, squeeze=False, **kwargs) for ax, k in zip(fig.get_axes(), range(kwargs['nrows'] * kwargs['ncols'])): if k + 1 <= lags: ax = lagplot(x, y, lag=k + 1, ax=ax, **lagplot_kwargs) ax.set_title(f"Lag {k + 1}", fontdict=dict(fontsize=14)) ax.set(xlabel="", ylabel="") else: ax.axis('off') plt.setp(axs[-1, :], xlabel=x.name) plt.setp(axs[:, 0], ylabel=y.name if y is not None else x.name) fig.tight_layout(w_pad=0.1, h_pad=0.1) return fig train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv') train_df_xgb = train_df.copy() train_df_xgb['num_sold'] = np.array(train_df['num_sold']) - np.array(train_df['num_sold_predicted']) train_df_xgb['day'] = train_df_xgb.index.dayofweek train_df_xgb['week'] = train_df_xgb.index.week train_df_xgb['dayofyear'] = train_df_xgb.index.dayofyear train_df_xgb['year'] = train_df_xgb.index.year train_df_xgb['month'] = train_df_xgb.index.month train_df_xgb['amount_time'] = train_df_xgb['month'] * 100 + train_df_xgb['day'] train_df_xgb['special_days'] = train_df_xgb['amount_time'].isin([101, 1228, 1229, 1230, 1231]).astype(int) train_df_xgb['month'] = np.cos(0.5236 * train_df_xgb['month']) test_df['day'] = test_df.index.dayofweek test_df['week'] = test_df.index.week test_df['dayofyear'] = test_df.index.dayofyear test_df['year'] = test_df.index.year test_df['month'] = test_df.index.month test_df['amount_time'] = test_df['month'] * 100 + test_df['day'] test_df['special_days'] = test_df['amount_time'].isin([101, 1228, 1229, 1230, 1231]).astype(int) test_df['month'] = np.cos(0.5236 * test_df['month']) def xgb_model(train_data, test_data): X = train_data.copy() target = X.pop('num_sold') X_train, X_valid, y_train, y_valid = train_test_split(X, target) xgb_train = xgb.DMatrix(X_train, label=y_train) xgb_eval = xgb.DMatrix(X_valid, label=y_valid) def objective(trial, xgb_train, xgb_eval): params = {'tree_method': trial.suggest_categorical('tree_method', ['gpu_hist']), 'objective': trial.suggest_categorical('objective', ['reg:squarederror']), 'eta': trial.suggest_float('eta', 0.001, 0.3, log=True), 'gamma': trial.suggest_float('gamma', 0.001, 1000, log=True), 'max_depth': trial.suggest_int('max_depth', 3, 12), 'grow_policy': trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide']), 'min_child_weight': trial.suggest_float('min_child_weight', 0.001, 1000, log=True), 'lambda': trial.suggest_float('lambda', 0.001, 100, log=True), 'alpha': trial.suggest_float('alpha', 0.001, 100, log=True), 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.9, 1.0, step=0.05), 'colsample_bylevel': trial.suggest_float('colsample_bylevel', 0.8, 1.0, step=0.05), 'colsample_bynode': trial.suggest_float('colsample_bynode', 0.7, 1.0, step=0.05), 'subsample': trial.suggest_float('subsample', 0.5, 1.0, step=0.05), 'eval_metric': trial.suggest_categorical('eval_metric', ['rmse'])} num_round = 1000 evallist = [(xgb_eval, 'eval')] model = xgb.train(params, xgb_train, num_round, evallist, early_stopping_rounds=10, verbose_eval=1000) return model.best_score study = optuna.create_study(direction='minimize', study_name='Xgboost') func = lambda trial: objective(trial, xgb_train, xgb_eval) study.optimize(func, n_trials=20) best_params = study.best_params evallist = [(xgb_eval, 'eval')] best_model = xgb.train(best_params, xgb_train, 2000, evallist, early_stopping_rounds=100, verbose_eval=2000) xgb_test = xgb.DMatrix(test_data) test_preds = best_model.predict(xgb_test) y_preds = best_model.predict(xgb.DMatrix(X, label=target)) return (test_preds, y_preds) test_preds, y_preds = xgb_model(pd.get_dummies(train_df_xgb.drop('num_sold_predicted', axis=1)), pd.get_dummies(test_df.drop('num_sold', axis=1))) xgb_y_pred = pd.Series(y_preds, index=train_df.index) xgb_y_fore = pd.Series(test_preds, index=test_df.index) _, ax = plt.subplots(figsize=(14, 6)) xgb_y_pred.groupby('date').mean().plot(ax=ax, label='Seasonal Learned by model') xgb_y_fore.groupby('date').mean().plot(ax=ax, label='Seasonal Forecast by model', color='C3') train_df_xgb.groupby('date').num_sold.mean().plot(ax=ax, label='Actual Seasonal') _ = ax.legend()
code
105197876/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.offsetbox import AnchoredText from scipy.signal import periodogram import math import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def seasonal_plot(X, y, period, freq, ax=None): if ax is None: _, ax = plt.subplots() palette = sns.color_palette("husl", n_colors=X[period].nunique(),) ax = sns.lineplot( x=freq, y=y, hue=period, data=X, ci=False, ax=ax, palette=palette, legend=False, ) ax.set_title(f"Seasonal Plot ({period}/{freq})") for line, name in zip(ax.lines, X[period].unique()): y_ = line.get_ydata()[-1] ax.annotate( name, xy=(1, y_), xytext=(6, 0), color=line.get_color(), xycoords=ax.get_yaxis_transform(), textcoords="offset points", size=14, va="center", ) return ax def plot_periodogram(ts, detrend='linear', ax=None): from scipy.signal import periodogram fs = pd.Timedelta("1Y") / pd.Timedelta("1D") freqencies, spectrum = periodogram( ts, fs=fs, detrend=detrend, window="boxcar", scaling='spectrum', ) if ax is None: _, ax = plt.subplots() ax.step(freqencies, spectrum, color="purple") ax.set_xscale("log") ax.set_xticks([1, 2, 4, 6, 12, 26, 52, 104]) ax.set_xticklabels( [ "Annual (1)", "Semiannual (2)", "Quarterly (4)", "Bimonthly (6)", "Monthly (12)", "Biweekly (26)", "Weekly (52)", "Semiweekly (104)", ], rotation=30, ) ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) ax.set_ylabel("Variance") ax.set_title("Periodogram") return ax def lagplot(x, y=None, lag=1, standardize=False, ax=None, **kwargs): from matplotlib.offsetbox import AnchoredText x_ = x.shift(lag) if standardize: x_ = (x_ - x_.mean()) / x_.std() if y is not None: y_ = (y - y.mean()) / y.std() if standardize else y else: y_ = x corr = y_.corr(x_) if ax is None: fig, ax = plt.subplots() scatter_kws = dict( alpha=0.75, s=3, ) line_kws = dict(color='C3', ) ax = sns.regplot(x=x_, y=y_, scatter_kws=scatter_kws, line_kws=line_kws, lowess=True, ax=ax, **kwargs) at = AnchoredText( f"{corr:.2f}", prop=dict(size="large"), frameon=True, loc="upper left", ) at.patch.set_boxstyle("square, pad=0.0") ax.add_artist(at) ax.set(title=f"Lag {lag}", xlabel=x_.name, ylabel=y_.name) return ax def plot_lags(x, y=None, lags=6, nrows=1, lagplot_kwargs={}, **kwargs): import math kwargs.setdefault('nrows', nrows) kwargs.setdefault('ncols', math.ceil(lags / nrows)) kwargs.setdefault('figsize', (kwargs['ncols'] * 2, nrows * 2 + 0.5)) fig, axs = plt.subplots(sharex=True, sharey=True, squeeze=False, **kwargs) for ax, k in zip(fig.get_axes(), range(kwargs['nrows'] * kwargs['ncols'])): if k + 1 <= lags: ax = lagplot(x, y, lag=k + 1, ax=ax, **lagplot_kwargs) ax.set_title(f"Lag {k + 1}", fontdict=dict(fontsize=14)) ax.set(xlabel="", ylabel="") else: ax.axis('off') plt.setp(axs[-1, :], xlabel=x.name) plt.setp(axs[:, 0], ylabel=y.name if y is not None else x.name) fig.tight_layout(w_pad=0.1, h_pad=0.1) return fig train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv') print('describtion of train data:') display(train_df.describe(include='object')) print('describtion of test data:') display(test_df.describe(include='object'))
code
105197876/cell_3
[ "text_html_output_2.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from matplotlib.offsetbox import AnchoredText from scipy.signal import periodogram import math import matplotlib.pyplot as plt import pandas as pd import seaborn as sns def seasonal_plot(X, y, period, freq, ax=None): if ax is None: _, ax = plt.subplots() palette = sns.color_palette("husl", n_colors=X[period].nunique(),) ax = sns.lineplot( x=freq, y=y, hue=period, data=X, ci=False, ax=ax, palette=palette, legend=False, ) ax.set_title(f"Seasonal Plot ({period}/{freq})") for line, name in zip(ax.lines, X[period].unique()): y_ = line.get_ydata()[-1] ax.annotate( name, xy=(1, y_), xytext=(6, 0), color=line.get_color(), xycoords=ax.get_yaxis_transform(), textcoords="offset points", size=14, va="center", ) return ax def plot_periodogram(ts, detrend='linear', ax=None): from scipy.signal import periodogram fs = pd.Timedelta("1Y") / pd.Timedelta("1D") freqencies, spectrum = periodogram( ts, fs=fs, detrend=detrend, window="boxcar", scaling='spectrum', ) if ax is None: _, ax = plt.subplots() ax.step(freqencies, spectrum, color="purple") ax.set_xscale("log") ax.set_xticks([1, 2, 4, 6, 12, 26, 52, 104]) ax.set_xticklabels( [ "Annual (1)", "Semiannual (2)", "Quarterly (4)", "Bimonthly (6)", "Monthly (12)", "Biweekly (26)", "Weekly (52)", "Semiweekly (104)", ], rotation=30, ) ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) ax.set_ylabel("Variance") ax.set_title("Periodogram") return ax def lagplot(x, y=None, lag=1, standardize=False, ax=None, **kwargs): from matplotlib.offsetbox import AnchoredText x_ = x.shift(lag) if standardize: x_ = (x_ - x_.mean()) / x_.std() if y is not None: y_ = (y - y.mean()) / y.std() if standardize else y else: y_ = x corr = y_.corr(x_) if ax is None: fig, ax = plt.subplots() scatter_kws = dict( alpha=0.75, s=3, ) line_kws = dict(color='C3', ) ax = sns.regplot(x=x_, y=y_, scatter_kws=scatter_kws, line_kws=line_kws, lowess=True, ax=ax, **kwargs) at = AnchoredText( f"{corr:.2f}", prop=dict(size="large"), frameon=True, loc="upper left", ) at.patch.set_boxstyle("square, pad=0.0") ax.add_artist(at) ax.set(title=f"Lag {lag}", xlabel=x_.name, ylabel=y_.name) return ax def plot_lags(x, y=None, lags=6, nrows=1, lagplot_kwargs={}, **kwargs): import math kwargs.setdefault('nrows', nrows) kwargs.setdefault('ncols', math.ceil(lags / nrows)) kwargs.setdefault('figsize', (kwargs['ncols'] * 2, nrows * 2 + 0.5)) fig, axs = plt.subplots(sharex=True, sharey=True, squeeze=False, **kwargs) for ax, k in zip(fig.get_axes(), range(kwargs['nrows'] * kwargs['ncols'])): if k + 1 <= lags: ax = lagplot(x, y, lag=k + 1, ax=ax, **lagplot_kwargs) ax.set_title(f"Lag {k + 1}", fontdict=dict(fontsize=14)) ax.set(xlabel="", ylabel="") else: ax.axis('off') plt.setp(axs[-1, :], xlabel=x.name) plt.setp(axs[:, 0], ylabel=y.name if y is not None else x.name) fig.tight_layout(w_pad=0.1, h_pad=0.1) return fig train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv') print('shape of train data: ', train_df.shape) display(train_df) print('shape of test data', test_df.shape) display(test_df) print('shape of sample submission data: ', sub_df.shape) display(sub_df)
code
105197876/cell_10
[ "text_html_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from matplotlib.offsetbox import AnchoredText from scipy.signal import periodogram from sklearn.model_selection import train_test_split import math import matplotlib.pyplot as plt import numpy as np import optuna import pandas as pd import seaborn as sns import xgboost as xgb def seasonal_plot(X, y, period, freq, ax=None): if ax is None: _, ax = plt.subplots() palette = sns.color_palette("husl", n_colors=X[period].nunique(),) ax = sns.lineplot( x=freq, y=y, hue=period, data=X, ci=False, ax=ax, palette=palette, legend=False, ) ax.set_title(f"Seasonal Plot ({period}/{freq})") for line, name in zip(ax.lines, X[period].unique()): y_ = line.get_ydata()[-1] ax.annotate( name, xy=(1, y_), xytext=(6, 0), color=line.get_color(), xycoords=ax.get_yaxis_transform(), textcoords="offset points", size=14, va="center", ) return ax def plot_periodogram(ts, detrend='linear', ax=None): from scipy.signal import periodogram fs = pd.Timedelta("1Y") / pd.Timedelta("1D") freqencies, spectrum = periodogram( ts, fs=fs, detrend=detrend, window="boxcar", scaling='spectrum', ) if ax is None: _, ax = plt.subplots() ax.step(freqencies, spectrum, color="purple") ax.set_xscale("log") ax.set_xticks([1, 2, 4, 6, 12, 26, 52, 104]) ax.set_xticklabels( [ "Annual (1)", "Semiannual (2)", "Quarterly (4)", "Bimonthly (6)", "Monthly (12)", "Biweekly (26)", "Weekly (52)", "Semiweekly (104)", ], rotation=30, ) ax.ticklabel_format(axis="y", style="sci", scilimits=(0, 0)) ax.set_ylabel("Variance") ax.set_title("Periodogram") return ax def lagplot(x, y=None, lag=1, standardize=False, ax=None, **kwargs): from matplotlib.offsetbox import AnchoredText x_ = x.shift(lag) if standardize: x_ = (x_ - x_.mean()) / x_.std() if y is not None: y_ = (y - y.mean()) / y.std() if standardize else y else: y_ = x corr = y_.corr(x_) if ax is None: fig, ax = plt.subplots() scatter_kws = dict( alpha=0.75, s=3, ) line_kws = dict(color='C3', ) ax = sns.regplot(x=x_, y=y_, scatter_kws=scatter_kws, line_kws=line_kws, lowess=True, ax=ax, **kwargs) at = AnchoredText( f"{corr:.2f}", prop=dict(size="large"), frameon=True, loc="upper left", ) at.patch.set_boxstyle("square, pad=0.0") ax.add_artist(at) ax.set(title=f"Lag {lag}", xlabel=x_.name, ylabel=y_.name) return ax def plot_lags(x, y=None, lags=6, nrows=1, lagplot_kwargs={}, **kwargs): import math kwargs.setdefault('nrows', nrows) kwargs.setdefault('ncols', math.ceil(lags / nrows)) kwargs.setdefault('figsize', (kwargs['ncols'] * 2, nrows * 2 + 0.5)) fig, axs = plt.subplots(sharex=True, sharey=True, squeeze=False, **kwargs) for ax, k in zip(fig.get_axes(), range(kwargs['nrows'] * kwargs['ncols'])): if k + 1 <= lags: ax = lagplot(x, y, lag=k + 1, ax=ax, **lagplot_kwargs) ax.set_title(f"Lag {k + 1}", fontdict=dict(fontsize=14)) ax.set(xlabel="", ylabel="") else: ax.axis('off') plt.setp(axs[-1, :], xlabel=x.name) plt.setp(axs[:, 0], ylabel=y.name if y is not None else x.name) fig.tight_layout(w_pad=0.1, h_pad=0.1) return fig train_df = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') test_df = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='date', parse_dates=['date']).drop('row_id', axis=1).to_period(freq='D') sub_df = pd.read_csv('../input/tabular-playground-series-sep-2022/sample_submission.csv') train_df_xgb = train_df.copy() train_df_xgb['num_sold'] = np.array(train_df['num_sold']) - np.array(train_df['num_sold_predicted']) train_df_xgb['day'] = train_df_xgb.index.dayofweek train_df_xgb['week'] = train_df_xgb.index.week train_df_xgb['dayofyear'] = train_df_xgb.index.dayofyear train_df_xgb['year'] = train_df_xgb.index.year train_df_xgb['month'] = train_df_xgb.index.month train_df_xgb['amount_time'] = train_df_xgb['month'] * 100 + train_df_xgb['day'] train_df_xgb['special_days'] = train_df_xgb['amount_time'].isin([101, 1228, 1229, 1230, 1231]).astype(int) train_df_xgb['month'] = np.cos(0.5236 * train_df_xgb['month']) test_df['day'] = test_df.index.dayofweek test_df['week'] = test_df.index.week test_df['dayofyear'] = test_df.index.dayofyear test_df['year'] = test_df.index.year test_df['month'] = test_df.index.month test_df['amount_time'] = test_df['month'] * 100 + test_df['day'] test_df['special_days'] = test_df['amount_time'].isin([101, 1228, 1229, 1230, 1231]).astype(int) test_df['month'] = np.cos(0.5236 * test_df['month']) def xgb_model(train_data, test_data): X = train_data.copy() target = X.pop('num_sold') X_train, X_valid, y_train, y_valid = train_test_split(X, target) xgb_train = xgb.DMatrix(X_train, label=y_train) xgb_eval = xgb.DMatrix(X_valid, label=y_valid) def objective(trial, xgb_train, xgb_eval): params = {'tree_method': trial.suggest_categorical('tree_method', ['gpu_hist']), 'objective': trial.suggest_categorical('objective', ['reg:squarederror']), 'eta': trial.suggest_float('eta', 0.001, 0.3, log=True), 'gamma': trial.suggest_float('gamma', 0.001, 1000, log=True), 'max_depth': trial.suggest_int('max_depth', 3, 12), 'grow_policy': trial.suggest_categorical('grow_policy', ['depthwise', 'lossguide']), 'min_child_weight': trial.suggest_float('min_child_weight', 0.001, 1000, log=True), 'lambda': trial.suggest_float('lambda', 0.001, 100, log=True), 'alpha': trial.suggest_float('alpha', 0.001, 100, log=True), 'colsample_bytree': trial.suggest_float('colsample_bytree', 0.9, 1.0, step=0.05), 'colsample_bylevel': trial.suggest_float('colsample_bylevel', 0.8, 1.0, step=0.05), 'colsample_bynode': trial.suggest_float('colsample_bynode', 0.7, 1.0, step=0.05), 'subsample': trial.suggest_float('subsample', 0.5, 1.0, step=0.05), 'eval_metric': trial.suggest_categorical('eval_metric', ['rmse'])} num_round = 1000 evallist = [(xgb_eval, 'eval')] model = xgb.train(params, xgb_train, num_round, evallist, early_stopping_rounds=10, verbose_eval=1000) return model.best_score study = optuna.create_study(direction='minimize', study_name='Xgboost') func = lambda trial: objective(trial, xgb_train, xgb_eval) study.optimize(func, n_trials=20) best_params = study.best_params evallist = [(xgb_eval, 'eval')] best_model = xgb.train(best_params, xgb_train, 2000, evallist, early_stopping_rounds=100, verbose_eval=2000) xgb_test = xgb.DMatrix(test_data) test_preds = best_model.predict(xgb_test) y_preds = best_model.predict(xgb.DMatrix(X, label=target)) return (test_preds, y_preds) test_preds, y_preds = xgb_model(pd.get_dummies(train_df_xgb.drop('num_sold_predicted', axis=1)), pd.get_dummies(test_df.drop('num_sold', axis=1))) xgb_y_pred = pd.Series(y_preds, index=train_df.index) xgb_y_fore = pd.Series(test_preds, index=test_df.index) _, ax = plt.subplots(figsize=(14, 6)) # ax = y.plot(color='0.25', style='.', title="Num Sold - Seasonal Forecast") xgb_y_pred.groupby('date').mean().plot(ax=ax, label="Seasonal Learned by model") xgb_y_fore.groupby('date').mean().plot(ax=ax, label="Seasonal Forecast by model", color='C3') train_df_xgb.groupby('date').num_sold.mean().plot(ax = ax, label='Actual Seasonal') _ = ax.legend() sub_df['num_sold'] = np.array(test_df['num_sold']) + np.array(xgb_y_fore) sub_df.to_csv('submission.csv', index=False) sub_df
code
105197876/cell_5
[ "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_30.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_40.png", "text_plain_output_20.png", "application_vnd.jupyter.stderr_output_31.png", "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_23.png", "text_plain_output_18.png", "text_plain_output_36.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_22.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_38.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_26.png", "text_plain_output_34.png", "application_vnd.jupyter.stderr_output_41.png", "text_plain_output_42.png", "text_plain_output_28.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_37.png" ]
_, ax = plt.subplots(12, 4, figsize=(14, 50)) test_df['num_sold'] = 0 train_df['num_sold_predicted'] = 0 for country, i in zip(train_df['country'].unique(), range(6)): for store, k in zip(train_df['store'].unique(), range(2)): for product, j in zip(train_df['product'].unique(), range(4)): temp_df = None temp_roll = None temp_df = train_df.loc[(train_df['country'] == country) & (train_df['store'] == store) & (train_df['product'] == product), ['num_sold']] temp_roll = temp_df.rolling(window=365, center=True, min_periods=183).mean() temp_df.plot(style='.', color='0.5', ax=ax[i * 2 + k, j], title=f'{country} {store} {product}') temp_roll.plot(ax=ax[i * 2 + k, j], linewidth=3, label='rolling') fourier = CalendarFourier(freq='A', order=10) dp = DeterministicProcess(index=temp_df.index, constant=True, order=1, seasonal=True, additional_terms=[fourier], drop=True) X = dp.in_sample() y = temp_df['num_sold'] model = LinearRegression(fit_intercept=False) model.fit(X, y) y_pred = pd.Series(model.predict(X), index=X.index) y_pred.plot(ax=ax[i * 2 + k, j], linewidth=0.5, label='Trend fitted') X_fore = dp.out_of_sample(steps=365) y_fore = pd.Series(model.predict(X_fore), index=X_fore.index) y_fore.plot(ax=ax[i * 2 + k, j], linewidth=0.5, label='Trend Forecast', color='C3') test_df.loc[(test_df['country'] == country) & (test_df['store'] == store) & (test_df['product'] == product), ['num_sold']] = y_fore train_df.loc[(train_df['country'] == country) & (train_df['store'] == store) & (train_df['product'] == product), ['num_sold_predicted']] = y_pred
code
105193549/cell_42
[ "text_html_output_1.png" ]
import ast import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0])) df_titles['genres2'] = df_titles['genres'].apply(ast.literal_eval) df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values) np.repeat(df_titles['title'], genres2_length) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5).loc[:5, ['title', 'genres']] df_titles df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], list(map(len, df_titles['genres'].apply(ast.literal_eval).values))), 'genres': np.concatenate(df_titles['genres'].apply(ast.literal_eval).values)}) df_titles_normalised df_titles_normalised.loc[df_titles_normalised['genres'] == 'drama', :] df_titles_normalised.loc[df_titles_normalised['genres'] == 'war', :] df_titles_normalised.loc[df_titles_normalised['genres'].isin(['war', 'drama']), :]
code
105193549/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a
code
105193549/cell_25
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised
code
105193549/cell_23
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length
code
105193549/cell_33
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values) np.repeat(df_titles['title'], genres2_length)
code
105193549/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles df_titles['genres2'].values
code
105193549/cell_40
[ "text_plain_output_1.png" ]
import ast import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0])) df_titles['genres2'] = df_titles['genres'].apply(ast.literal_eval) df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values) np.repeat(df_titles['title'], genres2_length) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5).loc[:5, ['title', 'genres']] df_titles df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], list(map(len, df_titles['genres'].apply(ast.literal_eval).values))), 'genres': np.concatenate(df_titles['genres'].apply(ast.literal_eval).values)}) df_titles_normalised df_titles_normalised.loc[df_titles_normalised['genres'] == 'drama', :]
code
105193549/cell_29
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values)
code
105193549/cell_41
[ "text_html_output_1.png" ]
import ast import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0])) df_titles['genres2'] = df_titles['genres'].apply(ast.literal_eval) df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values) np.repeat(df_titles['title'], genres2_length) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5).loc[:5, ['title', 'genres']] df_titles df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], list(map(len, df_titles['genres'].apply(ast.literal_eval).values))), 'genres': np.concatenate(df_titles['genres'].apply(ast.literal_eval).values)}) df_titles_normalised df_titles_normalised.loc[df_titles_normalised['genres'] == 'drama', :] df_titles_normalised.loc[df_titles_normalised['genres'] == 'war', :]
code
105193549/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a type(a[0])
code
105193549/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles df_titles['genres'].values
code
105193549/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles
code
105193549/cell_18
[ "text_plain_output_1.png" ]
import ast import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0])) df_titles['genres2'] = df_titles['genres'].apply(ast.literal_eval) df_titles
code
105193549/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles df_titles['title']
code
105193549/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles df_titles['genres2'].values
code
105193549/cell_15
[ "text_plain_output_1.png" ]
import ast import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0])
code
105193549/cell_16
[ "text_plain_output_1.png" ]
import ast import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0]))
code
105193549/cell_38
[ "text_plain_output_1.png" ]
import ast import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a ast.literal_eval(a[0]) type(ast.literal_eval(a[0])) df_titles['genres2'] = df_titles['genres'].apply(ast.literal_eval) df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised np.concatenate(df_titles['genres2'].values) np.repeat(df_titles['title'], genres2_length) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5).loc[:5, ['title', 'genres']] df_titles df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], list(map(len, df_titles['genres'].apply(ast.literal_eval).values))), 'genres': np.concatenate(df_titles['genres'].apply(ast.literal_eval).values)}) df_titles_normalised
code
105193549/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105193549/cell_31
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length genres2_length
code
105193549/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a a[0]
code
105193549/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles df_titles['genres2'].values
code
105193549/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles a = df_titles['genres'].values a a[0]
code
105193549/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles
code
105193549/cell_36
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5) df_titles df_titles = df_titles.loc[:5, ['title', 'genres']] df_titles genres2_length = list(map(len, df_titles['genres2'].values)) genres2_length df_titles_normalised = pd.DataFrame({'title': np.repeat(df_titles['title'], genres2_length), 'genres3': np.concatenate(df_titles['genres2'].values)}) df_titles_normalised df_titles = pd.read_csv('../input/netflix-tv-shows-and-movies/titles.csv', nrows=5).loc[:5, ['title', 'genres']] df_titles
code
48162408/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') train_f.shape train_f.columns
code
48162408/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') test_f.shape test_f.head()
code
48162408/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() targ_score.shape
code
48162408/cell_29
[ "text_plain_output_1.png" ]
from sklearn.metrics import log_loss from sklearn.model_selection import KFold from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() test_f.shape train_f.shape train_f.columns def preprocess(df): df = df.copy() df.loc[:, 'cp_type'] = df.loc[:, 'cp_type'].map({'trt_cp': 0, 'ctl_vehicle': 1}) df.loc[:, 'cp_dose'] = df.loc[:, 'cp_dose'].map({'D1': 0, 'D2': 1}) del df['sig_id'] return df train = preprocess(train_f) test = preprocess(test_f) del targ_score['sig_id'] targ_score.shape def metric(y_true, y_pred): metrics = [] metrics.append(log_loss(y_true, y_pred.astype(float), labels=[0, 1])) return np.mean(metrics) cols = targ_score.columns submission = sample.copy() submission.loc[:, cols] = 0 submission N_splits = 5 off_loss = 0 for c, columns in enumerate(cols, 1): y = targ_score[columns] total_loss = 0 for fn, (trn_idx, val_idx) in enumerate(KFold(n_splits=N_splits, shuffle=True).split(train)): X_train, X_val = (train.iloc[trn_idx], train.iloc[val_idx]) y_train, y_val = (y.iloc[trn_idx], y.iloc[val_idx]) model = XGBRegressor(tree_method='gpu_hist', min_child_weight=1, learning_rate=0.015, colsample_bytree=0.65, gamma=3.69, max_delta_step=2.07, max_depth=10, n_estimators=207, subsample=1) model.fit(X_train, y_train) pred = model.predict(X_val) loss = metric(y_val, pred) total_loss += loss predictions = model.predict(test) submission[columns] += predictions / N_splits off_loss += total_loss / N_splits submission
code
48162408/cell_11
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') test_f.shape
code
48162408/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
48162408/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis=1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis=1) fig, axes = plt.subplots(figsize=(32, 8), ncols=2) sns.countplot(scored_targets, ax=axes[0]) sns.countplot(nscored_targets, ax=axes[1]) for i in range(2): axes[i].tick_params(axis='x', labelsize=20) axes[i].tick_params(axis='y', labelsize=20) axes[0].set_title(f'Training set unique scored per sample', size=22, pad=22) axes[1].set_title(f'Training set unique not scored per sample', size=22, pad=22) plt.show()
code
48162408/cell_28
[ "text_html_output_1.png" ]
from sklearn.metrics import log_loss from sklearn.model_selection import KFold from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() test_f.shape train_f.shape train_f.columns def preprocess(df): df = df.copy() df.loc[:, 'cp_type'] = df.loc[:, 'cp_type'].map({'trt_cp': 0, 'ctl_vehicle': 1}) df.loc[:, 'cp_dose'] = df.loc[:, 'cp_dose'].map({'D1': 0, 'D2': 1}) del df['sig_id'] return df train = preprocess(train_f) test = preprocess(test_f) del targ_score['sig_id'] targ_score.shape def metric(y_true, y_pred): metrics = [] metrics.append(log_loss(y_true, y_pred.astype(float), labels=[0, 1])) return np.mean(metrics) cols = targ_score.columns submission = sample.copy() submission.loc[:, cols] = 0 submission N_splits = 5 off_loss = 0 for c, columns in enumerate(cols, 1): y = targ_score[columns] total_loss = 0 for fn, (trn_idx, val_idx) in enumerate(KFold(n_splits=N_splits, shuffle=True).split(train)): X_train, X_val = (train.iloc[trn_idx], train.iloc[val_idx]) y_train, y_val = (y.iloc[trn_idx], y.iloc[val_idx]) model = XGBRegressor(tree_method='gpu_hist', min_child_weight=1, learning_rate=0.015, colsample_bytree=0.65, gamma=3.69, max_delta_step=2.07, max_depth=10, n_estimators=207, subsample=1) model.fit(X_train, y_train) pred = model.predict(X_val) loss = metric(y_val, pred) total_loss += loss predictions = model.predict(test) submission[columns] += predictions / N_splits off_loss += total_loss / N_splits off_loss / 100
code
48162408/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() fig, axes = plt.subplots(figsize=(24, 24), nrows=3, ncols=2) sns.countplot(train_f['cp_type'], ax=axes[0][0]) sns.countplot(test_f['cp_type'], ax=axes[0][1]) sns.countplot(train_f['cp_time'], ax=axes[1][0]) sns.countplot(test_f['cp_time'], ax=axes[1][1]) sns.countplot(train_f['cp_dose'], ax=axes[2][0]) sns.countplot(test_f['cp_dose'], ax=axes[2][1]) for i, f in enumerate(['cp_type', 'cp_time', 'cp_dose']): for j, d in enumerate(['training', 'test']): axes[i][j].set_title(f'{d} Set {f} Distribution', size=20, pad=15)
code
48162408/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') train.head()
code
48162408/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') drug.head()
code
48162408/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() len(targ_score)
code
48162408/cell_10
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') len(train_f) - len(test_f)
code
48162408/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import log_loss from sklearn.model_selection import KFold from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') stargs_name = list(targ_score.columns[1:]) scored_targets = train[list(targ_score.columns[1:])].sum(axis = 1) nscored_targets = train[list(targ_nscore.columns[1:])].sum(axis = 1) fig,axes = plt.subplots(figsize = (32,8),ncols = 2) sns.countplot(scored_targets,ax = axes[0]) sns.countplot(nscored_targets,ax = axes[1]) # scored_targets for i in range(2): axes[i].tick_params(axis = 'x',labelsize =20) axes[i].tick_params(axis = 'y', labelsize = 20) axes[0].set_title(f'Training set unique scored per sample',size = 22 , pad = 22) axes[1].set_title(f'Training set unique not scored per sample',size = 22 , pad = 22) plt.show() test_f.shape train_f.shape train_f.columns def preprocess(df): df = df.copy() df.loc[:, 'cp_type'] = df.loc[:, 'cp_type'].map({'trt_cp': 0, 'ctl_vehicle': 1}) df.loc[:, 'cp_dose'] = df.loc[:, 'cp_dose'].map({'D1': 0, 'D2': 1}) del df['sig_id'] return df train = preprocess(train_f) test = preprocess(test_f) del targ_score['sig_id'] targ_score.shape def metric(y_true, y_pred): metrics = [] metrics.append(log_loss(y_true, y_pred.astype(float), labels=[0, 1])) return np.mean(metrics) cols = targ_score.columns submission = sample.copy() submission.loc[:, cols] = 0 submission N_splits = 5 off_loss = 0 for c, columns in enumerate(cols, 1): y = targ_score[columns] total_loss = 0 for fn, (trn_idx, val_idx) in enumerate(KFold(n_splits=N_splits, shuffle=True).split(train)): print('Fold :', fn + 1) X_train, X_val = (train.iloc[trn_idx], train.iloc[val_idx]) y_train, y_val = (y.iloc[trn_idx], y.iloc[val_idx]) model = XGBRegressor(tree_method='gpu_hist', min_child_weight=1, learning_rate=0.015, colsample_bytree=0.65, gamma=3.69, max_delta_step=2.07, max_depth=10, n_estimators=207, subsample=1) model.fit(X_train, y_train) pred = model.predict(X_val) loss = metric(y_val, pred) total_loss += loss predictions = model.predict(test) submission[columns] += predictions / N_splits off_loss += total_loss / N_splits print('Model ' + str(c) + ':Loss = ' + str(total_loss / N_splits))
code
48162408/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sample = pd.read_csv('../input/lish-moa/sample_submission.csv') test_f = pd.read_csv('../input/lish-moa/test_features.csv') train_f = pd.read_csv('../input/lish-moa/train_features.csv') drug = pd.read_csv('../input/lish-moa/train_drug.csv') targ_nscore = pd.read_csv('../input/lish-moa/train_targets_nonscored.csv') targ_score = pd.read_csv('../input/lish-moa/train_targets_scored.csv') train = train_f.merge(targ_score, on='sig_id', how='left') train = train.merge(targ_nscore, on='sig_id', how='left') train_f.shape
code
1004150/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) poke = pd.read_csv('../input/Pokemon.csv') poke[poke['Dual'] == 0]
code
1004150/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1004150/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) poke = pd.read_csv('../input/Pokemon.csv') poke['Type 2'].fillna('No Type')
code
1004150/cell_8
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) poke = pd.read_csv('../input/Pokemon.csv') poke.head(10)
code
1004150/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) poke = pd.read_csv('../input/Pokemon.csv') print(poke.sample(20)) print('===============================================') print(poke.dtypes)
code
1004150/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) poke = pd.read_csv('../input/Pokemon.csv') print('%0.2f percent of the Pokemon dont have a secondary type' % (poke['Type 2'].isnull().sum() / len(poke) * 100)) print('Roughly %0.2f percent are legendary types.' % (len(poke[poke['Legendary'] == True]) * 100 / len(poke)))
code
129005548/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') df.info()
code
129005548/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns
code
129005548/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129005548/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) plt.title('Diabetes patients per gender', size=30) sns.barplot(data=gender, x='gender', y='amount')
code
129005548/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) diabetes = df[df['diabetes'] == 1] plt.figure(figsize=(15, 10)) sns.stripplot(diabetes, x='smoking_history', y='bmi', hue='gender')
code
129005548/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) diabetes = df[df['diabetes'] == 1] plt.figure(figsize=(15, 10)) sns.stripplot(diabetes, y='age', x='smoking_history', hue='heart_disease')
code
129005548/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') df.head()
code
129005548/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) diabetes = df[df['diabetes'] == 1] sns.stripplot(diabetes, y='age', x='hypertension', hue='gender')
code
129005548/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) diabetes = df[df['diabetes'] == 1] sns.scatterplot(diabetes, x='HbA1c_level', y='blood_glucose_level', hue='gender')
code
129005548/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/diabetes-prediction-dataset/diabetes_prediction_dataset.csv') gender = pd.pivot_table(df[df['diabetes'] == 1], index='gender', values='age', aggfunc=len).reset_index() gender.rename(columns={'age': 'amount'}, inplace=True) diabetes = df[df['diabetes'] == 1] sns.stripplot(diabetes, y='age', x='heart_disease', hue='gender')
code