path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72089413/cell_30
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm import tqdm from tqdm.notebook import tqdm import json_lines import pandas as pd import random data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) target = ['subscriberCount'] dataY = data2[target[0]] dataX = data2.drop(target, axis=1) n = len(dataX) random.seed(2021) random.shuffle(N) trainX = dataX.loc[N[0:n // 4 * 3]] trainY = dataY.loc[N[0:n // 4 * 3]] testX = dataX.loc[N[n // 4 * 3:]] testY = dataY.loc[N[n // 4 * 3:]] df_columns = list(dataX.columns) def create_numeric_feature(input_df): use_columns = df_columns return input_df[use_columns].copy() from tqdm import tqdm def to_feature(input_df): processors = [create_numeric_feature] out_df = pd.DataFrame() for func in tqdm(processors, total=len(processors)): with Timer(prefix='create' + func.__name__ + ' '): _df = func(input_df) assert len(_df) == len(input_df), func.__name__ out_df = pd.concat([out_df, _df], axis=1) return out_df train_feat_df = to_feature(trainX) test_feat_df = to_feature(testX)
code
72089413/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import json_lines data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] data0[0][0]
code
72089413/cell_26
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd import random data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) target = ['subscriberCount'] dataY = data2[target[0]] dataX = data2.drop(target, axis=1) n = len(dataX) random.seed(2021) random.shuffle(N) trainX = dataX.loc[N[0:n // 4 * 3]] trainY = dataY.loc[N[0:n // 4 * 3]] testX = dataX.loc[N[n // 4 * 3:]] testY = dataY.loc[N[n // 4 * 3:]] df_columns = list(dataX.columns) print(df_columns)
code
72089413/cell_11
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi])
code
72089413/cell_19
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) data2
code
72089413/cell_1
[ "text_plain_output_1.png" ]
!pip install json_lines
code
72089413/cell_7
[ "text_plain_output_1.png" ]
import json_lines data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] data0[0][0].keys()
code
72089413/cell_18
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) data2.info()
code
72089413/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error import lightgbm as lgbm import numpy as np import lightgbm as lgbm from sklearn.metrics import mean_squared_error def fit_lgbm(X, y, cv, params: dict=None, verbose: int=50): if params is None: params = {} models = [] oof_pred = np.zeros_like(y, dtype=np.float) for i, (idx_train, idx_valid) in enumerate(cv): x_train, y_train = (X[idx_train], y[idx_train]) x_valid, y_valid = (X[idx_valid], y[idx_valid]) clf = lgbm.LGBMRegressor(**params) with Timer(prefix='fit fold={} '.format(i)): clf.fit(x_train, y_train, eval_set=[(x_valid, y_valid)], early_stopping_rounds=100, verbose=verbose) pred_i = clf.predict(x_valid) oof_pred[idx_valid] = pred_i models.append(clf) print(f'Fold {i} RMSLE: {mean_squared_error(y_valid, pred_i) ** 0.5:.4f}') print() score = mean_squared_error(y, oof_pred) ** 0.5 print('-' * 50) print('FINISHED | Whole RMSLE: {:.4f}'.format(score)) return (oof_pred, models)
code
72089413/cell_15
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2
code
72089413/cell_3
[ "text_html_output_1.png" ]
import tensorflow as tf import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Device:', tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print('Number of replicas:', strategy.num_replicas_in_sync)
code
72089413/cell_35
[ "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pandas.io.json import json_normalize from tqdm import tqdm from tqdm.notebook import tqdm import json_lines import pandas as pd import random data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) target = ['subscriberCount'] dataY = data2[target[0]] dataX = data2.drop(target, axis=1) n = len(dataX) random.seed(2021) random.shuffle(N) trainX = dataX.loc[N[0:n // 4 * 3]] trainY = dataY.loc[N[0:n // 4 * 3]] testX = dataX.loc[N[n // 4 * 3:]] testY = dataY.loc[N[n // 4 * 3:]] df_columns = list(dataX.columns) def create_numeric_feature(input_df): use_columns = df_columns return input_df[use_columns].copy() from tqdm import tqdm def to_feature(input_df): processors = [create_numeric_feature] out_df = pd.DataFrame() for func in tqdm(processors, total=len(processors)): with Timer(prefix='create' + func.__name__ + ' '): _df = func(input_df) assert len(_df) == len(input_df), func.__name__ out_df = pd.concat([out_df, _df], axis=1) return out_df y = trainY ydf = pd.DataFrame(y) ydf
code
72089413/cell_14
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1
code
72089413/cell_22
[ "text_html_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) target = ['subscriberCount'] dataY = data2[target[0]] dataX = data2.drop(target, axis=1) print(dataY[0:5].T) print() print(dataX[0:5].T)
code
72089413/cell_10
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize import json_lines data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0
code
72089413/cell_37
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm import tqdm from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) N = list(range(10000)) data1 = users0.copy() data1['index0'] = N data1 = data1.set_index('index0', drop=True) data1 data2 = data1.drop(['channelId', 'videoId', 'videoPublished'], axis=1) data2 data2 = data2.astype(float) target = ['subscriberCount'] dataY = data2[target[0]] dataX = data2.drop(target, axis=1) print(target)
code
72089413/cell_12
[ "text_plain_output_1.png" ]
from pandas.io.json import json_normalize from tqdm.notebook import tqdm import json_lines import pandas as pd data0 = [] with open('../input/youtubes-channels-dataset/YouTubeDataset_withChannelElapsed.json', 'rb') as f: for i, item in enumerate(json_lines.reader(f)): if i < 10000: data0 += [item] users0 = json_normalize(data0[0][0]) users0 for i, item in tqdm(enumerate(data0[0])): if 0 < i and i < 10000: usersi = json_normalize(item) users0 = pd.concat([users0, usersi]) print(len(users0))
code
2015167/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import hamming_loss import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import hamming_loss hamming_loss(y, y_hat)
code
2015167/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df.head()
code
2015167/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import fbeta_score import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import fbeta_score fbeta_score(y, y_hat, beta=1)
code
2015167/cell_8
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import confusion_matrix confusion_matrix(y, y_hat)
code
2015167/cell_15
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, precision_recall_curve import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import precision_score, recall_score, precision_recall_curve print(precision_score(y, y_hat)) print(recall_score(y, y_hat))
code
2015167/cell_3
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X)
code
2015167/cell_17
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import precision_score, recall_score, precision_recall_curve import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') precision, recall, _ = precision_recall_curve(y, y_hat) fig, ax = plt.subplots(1, figsize=(12, 6)) ax.step(recall, precision, color='steelblue', where='post') ax.fill_between(recall, precision, step='post', color='lightgray') plt.suptitle('Precision-Recall Tradeoff for Seattle Rain Prediction') plt.xlabel('Recall') plt.ylabel('Precision')
code
2015167/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix import pandas as pd import seaborn as sns import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) import seaborn as sns sns.heatmap(confusion_matrix(y, y_hat) / len(y), cmap='Blues', annot=True)
code
2015167/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/seattleWeather_1948-2017.csv') df = df.dropna() X = df.loc[:, ['PRCP', 'TMAX', 'TMIN']].shift(-1).iloc[:-1].values y = df.iloc[:-1, -1:].values.astype('int') from sklearn.linear_model import LogisticRegression clf = LogisticRegression() clf.fit(X, y) y_hat = clf.predict(X) from sklearn.metrics import accuracy_score accuracy_score(y, y_hat)
code
17144473/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from bokeh.io import output_file,show,output_notebook,push_notebook from bokeh.models import ColumnDataSource,HoverTool,CategoricalColorMapper import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/scmp2k19.csv') df.loc[:, ['district', 'mandal', 'location']].sample(7, random_state=1) factors = list(df.mandal.unique()) colors = ['red', 'green', 'blue', 'black', 'orange', 'brown', 'grey', 'purple', 'yellow', 'white', 'pink', 'peru'] mapper = CategoricalColorMapper(factors=factors, palette=colors) plot = figure() plot.circle(x='odate', y='humidity_min', source=source, color={'field': 'Genre', 'transform': mapper}) show(plot)
code
17144473/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/scmp2k19.csv') df.info()
code
17144473/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from bokeh.io import output_file,show,output_notebook,push_notebook from bokeh.layouts import row,column,gridplot,widgetbox p1 = figure() p1.circle(x='district', y='Rangareddy', source=source, color='red') p2 = figure() p2.circle(x='district', y='Warangal', source=source, color='black') p3 = figure() p3.circle(x='district', y='Khammam', source=source, color='blue') p4 = figure() p4.circle(x='district', y='Nalgonda', source=source, color='orange') layout1 = row(p1, p2) layout2 = row(p3, p4) layout3 = column(layout1, layout2) show(layout3)
code
17144473/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from bokeh.io import output_file,show,output_notebook,push_notebook import os import numpy as np import pandas as pd import seaborn as sns from ipywidgets import interact from bokeh.io import output_file, show, output_notebook, push_notebook from bokeh.plotting import * from bokeh.models import ColumnDataSource, HoverTool, CategoricalColorMapper from bokeh.layouts import row, column, gridplot, widgetbox from bokeh.layouts import layout from bokeh.embed import file_html from bokeh.models import Text from bokeh.models import Plot from bokeh.models import Slider from bokeh.models import Circle from bokeh.models import Range1d from bokeh.models import CustomJS from bokeh.models import LinearAxis from bokeh.models import SingleIntervalTicker from bokeh.palettes import Spectral6 output_notebook() import os print(os.listdir('../input'))
code
17144473/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from bokeh.io import output_file,show,output_notebook,push_notebook from bokeh.layouts import row,column,gridplot,widgetbox # Row and column p1 = figure() p1.circle(x = "district",y= "Rangareddy",source = source,color="red") p2 = figure() p2.circle(x = "district",y= "Warangal",source = source,color="black") p3 = figure() p3.circle(x = "district",y= "Khammam",source = source,color="blue") p4 = figure() p4.circle(x = "district",y= "Nalgonda",source = source,color="orange") layout1 = row(p1,p2) layout2 = row(p3,p4) layout3= column(layout1,layout2) show(layout3) show(layout3)
code
17144473/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/scmp2k19.csv') df.loc[:, ['district', 'mandal', 'location']].sample(7, random_state=1)
code
18124991/cell_4
[ "text_plain_output_1.png" ]
from torch import nn import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torchvision from torch import nn from fastai.vision import * import torchvision df = pd.read_csv('../input/train.csv') path = '../input' device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = torchvision.models.resnext101_32x8d(pretrained=True) iB = ImageDataBunch.from_df(path=path, df=df, folder='train_images', seed=42, suffix='.png', test='test_images', size=224, bs=32, ds_tfms=get_transforms(do_flip=True, max_warp=0, max_rotate=0, max_lighting=0, p_affine=0, xtra_tfms=[crop_pad()])) model1 = torchvision.models.resnext101_32x8d(pretrained=True) model1.fc = nn.Sequential(nn.BatchNorm1d(2048), nn.Dropout(p=0.25), nn.Linear(2048, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Dropout(p=0.5), nn.Linear(512, 5)) model1.to(device) learn1 = Learner(data=iB, model=model1, model_dir='/tmp/models', metrics=[accuracy]) learn2 = cnn_learner(data=iB, base_arch=models.resnet152, model_dir='/tmp/models', metrics=[accuracy]) learn3 = cnn_learner(data=iB, base_arch=models.densenet201, model_dir='/tmp/models', metrics=[accuracy]) learn4 = cnn_learner(data=iB, base_arch=models.vgg16_bn, model_dir='/tmp/models', metrics=[accuracy]) learn1.fit_one_cycle(7, slice(0.0008)) model1 = learn1.model learn2.unfreeze() learn2.fit_one_cycle(7, slice(0.003)) model2 = learn2.model learn3.unfreeze() learn3.fit_one_cycle(7, slice(0.003)) model3 = learn3.model learn4.unfreeze() learn4.fit_one_cycle(7, slice(0.003)) model4 = learn4.model torch.save(model1, './model1.pth') torch.save(model2, './model2.pth') torch.save(model3, './model3.pth') torch.save(model4, './model4.pth') dff = pd.read_csv('../input/test.csv') src = ImageList.from_df(dff, path='../input', folder='test_images', suffix='.png').split_none().label_empty() model1.eval() model2.eval() model3.eval() model4.eval() iB = ImageDataBunch.create_from_ll(src, size=224, bs=32, ds_tfms=get_transforms(do_flip=True, max_warp=0, max_rotate=0, max_lighting=0, p_affine=0.2, xtra_tfms=[crop_pad()])) predictor1 = Learner(data=iB, model=model1, model_dir='/') preds1 = predictor1.get_preds(ds_type=DatasetType.Fix) predictor2 = Learner(data=iB, model=model2, model_dir='/') preds2 = predictor2.get_preds(ds_type=DatasetType.Fix) predictor3 = Learner(data=iB, model=model3, model_dir='/') preds3 = predictor3.get_preds(ds_type=DatasetType.Fix) predictor4 = Learner(data=iB, model=model4, model_dir='/') preds4 = predictor4.get_preds(ds_type=DatasetType.Fix) labels1, labels2, labels3, labels4 = ([], [], [], []) print('Predicting from model1....') for pr in preds1[0]: p = pr.tolist() labels1.append(np.argmax(p)) print('Predicting from model2....') for pr in preds2[0]: p = pr.tolist() labels2.append(np.argmax(p)) print('Predicting from model3....') for pr in preds3[0]: p = pr.tolist() labels3.append(np.argmax(p)) print('Predicting from model4....') for pr in preds4[0]: p = pr.tolist() labels4.append(np.argmax(p))
code
18124991/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from torch import nn import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torchvision from torch import nn from fastai.vision import * import torchvision df = pd.read_csv('../input/train.csv') path = '../input' device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = torchvision.models.resnext101_32x8d(pretrained=True) iB = ImageDataBunch.from_df(path=path, df=df, folder='train_images', seed=42, suffix='.png', test='test_images', size=224, bs=32, ds_tfms=get_transforms(do_flip=True, max_warp=0, max_rotate=0, max_lighting=0, p_affine=0, xtra_tfms=[crop_pad()])) model1 = torchvision.models.resnext101_32x8d(pretrained=True) model1.fc = nn.Sequential(nn.BatchNorm1d(2048), nn.Dropout(p=0.25), nn.Linear(2048, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Dropout(p=0.5), nn.Linear(512, 5)) model1.to(device) learn1 = Learner(data=iB, model=model1, model_dir='/tmp/models', metrics=[accuracy]) learn2 = cnn_learner(data=iB, base_arch=models.resnet152, model_dir='/tmp/models', metrics=[accuracy]) learn3 = cnn_learner(data=iB, base_arch=models.densenet201, model_dir='/tmp/models', metrics=[accuracy]) learn4 = cnn_learner(data=iB, base_arch=models.vgg16_bn, model_dir='/tmp/models', metrics=[accuracy])
code
18124991/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18124991/cell_3
[ "text_html_output_4.png", "text_plain_output_4.png", "text_html_output_2.png", "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_html_output_3.png" ]
from torch import nn import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torchvision from torch import nn from fastai.vision import * import torchvision df = pd.read_csv('../input/train.csv') path = '../input' device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') model = torchvision.models.resnext101_32x8d(pretrained=True) iB = ImageDataBunch.from_df(path=path, df=df, folder='train_images', seed=42, suffix='.png', test='test_images', size=224, bs=32, ds_tfms=get_transforms(do_flip=True, max_warp=0, max_rotate=0, max_lighting=0, p_affine=0, xtra_tfms=[crop_pad()])) model1 = torchvision.models.resnext101_32x8d(pretrained=True) model1.fc = nn.Sequential(nn.BatchNorm1d(2048), nn.Dropout(p=0.25), nn.Linear(2048, 512), nn.ReLU(), nn.BatchNorm1d(512), nn.Dropout(p=0.5), nn.Linear(512, 5)) model1.to(device) learn1 = Learner(data=iB, model=model1, model_dir='/tmp/models', metrics=[accuracy]) learn2 = cnn_learner(data=iB, base_arch=models.resnet152, model_dir='/tmp/models', metrics=[accuracy]) learn3 = cnn_learner(data=iB, base_arch=models.densenet201, model_dir='/tmp/models', metrics=[accuracy]) learn4 = cnn_learner(data=iB, base_arch=models.vgg16_bn, model_dir='/tmp/models', metrics=[accuracy]) print('Training ResNeXt101_32x8d....') learn1.fit_one_cycle(7, slice(0.0008)) model1 = learn1.model print('Training Resnet152....') learn2.unfreeze() learn2.fit_one_cycle(7, slice(0.003)) model2 = learn2.model print('Traning Densenet201....') learn3.unfreeze() learn3.fit_one_cycle(7, slice(0.003)) model3 = learn3.model print('Training VGG16......') learn4.unfreeze() learn4.fit_one_cycle(7, slice(0.003)) model4 = learn4.model torch.save(model1, './model1.pth') torch.save(model2, './model2.pth') torch.save(model3, './model3.pth') torch.save(model4, './model4.pth')
code
333270/cell_9
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import train_test_split import numpy as np import xgboost as xgb ids = test['id'] test = test.drop(['id'], axis=1) y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729) test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')]
code
333270/cell_6
[ "text_plain_output_84.png", "text_plain_output_56.png", "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_78.png", "text_plain_output_37.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_5.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_30.png", "text_plain_output_73.png", "text_plain_output_15.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_86.png", "text_plain_output_40.png", "text_plain_output_74.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_60.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_13.png", "text_plain_output_52.png", "text_plain_output_66.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_88.png", "text_plain_output_29.png", "text_plain_output_58.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_27.png", "text_plain_output_76.png", "text_plain_output_54.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_57.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "text_plain_output_77.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_87.png", "text_plain_output_3.png", "application_vnd.jupyter.stderr_output_19.png", "text_plain_output_22.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_91.png", "text_plain_output_16.png", "text_plain_output_59.png", "text_plain_output_71.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_85.png", "text_plain_output_42.png", "text_plain_output_67.png", "text_plain_output_53.png", "text_plain_output_23.png", "text_plain_output_89.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_55.png", "text_plain_output_82.png", "text_plain_output_80.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_62.png", "text_plain_output_61.png", "text_plain_output_83.png", "application_vnd.jupyter.stderr_output_92.png", "text_plain_output_46.png" ]
nrows = 5000000 dtype = {'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16, 'Demanda_uni_equil': np.uint16} train_filename = '../input/train.csv' print('Loading Train... nrows : {0}'.format(nrows)) train.head()
code
333270/cell_11
[ "text_html_output_1.png" ]
from sklearn.cross_validation import train_test_split import math import numpy as np import pandas as pd import xgboost as xgb def evalerror(preds, dtrain): labels = dtrain.get_label() assert len(preds) == len(labels) labels = labels.tolist() preds = preds.tolist() terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)] return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5) ids = test['id'] test = test.drop(['id'], axis=1) y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729) params = {} params['objective'] = 'reg:linear' params['eta'] = 0.02 params['max_depth'] = 5 params['subsample'] = 0.8 params['colsample_bytree'] = 0.6 params['silent'] = True params['booster'] = 'gbtree' test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] chunksize = 2500000 num_rounds = 70 for train in pd.read_csv(train_filename, chunksize=chunksize, iterator=True, dtype=dtype, warn_bad_lines=True, engine='c'): y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1729) test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] xgclassifier = xgb.train(params, xg_train, num_rounds, watchlist, feval=evalerror, early_stopping_rounds=30, verbose_eval=5, xgb_model=xgclassifier)
code
333270/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cross_validation import train_test_split print('Training_Shape:', train.shape) ids = test['id'] test = test.drop(['id'], axis=1) y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729) print('Division_Set_Shapes:', X.shape, y.shape) print('Validation_Set_Shapes:', X_train.shape, X_test.shape)
code
333270/cell_16
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from ml_metrics import rmsle from sklearn.cross_validation import train_test_split import math import numpy as np import pandas as pd import xgboost as xgb def evalerror(preds, dtrain): labels = dtrain.get_label() assert len(preds) == len(labels) labels = labels.tolist() preds = preds.tolist() terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)] return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5) ids = test['id'] test = test.drop(['id'], axis=1) y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729) params = {} params['objective'] = 'reg:linear' params['eta'] = 0.02 params['max_depth'] = 5 params['subsample'] = 0.8 params['colsample_bytree'] = 0.6 params['silent'] = True params['booster'] = 'gbtree' test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] chunksize = 2500000 num_rounds = 70 for train in pd.read_csv(train_filename, chunksize=chunksize, iterator=True, dtype=dtype, warn_bad_lines=True, engine='c'): y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1729) test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] xgclassifier = xgb.train(params, xg_train, num_rounds, watchlist, feval=evalerror, early_stopping_rounds=30, verbose_eval=5, xgb_model=xgclassifier) preds = xgclassifier.predict(xg_test, ntree_limit=xgclassifier.best_iteration) print('RMSLE Score:', rmsle(y_test, preds)) del preds del y_test
code
333270/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
num_rounds = 100
code
333270/cell_12
[ "text_html_output_1.png" ]
from sklearn.cross_validation import train_test_split import math import numpy as np import pandas as pd import xgboost as xgb def evalerror(preds, dtrain): labels = dtrain.get_label() assert len(preds) == len(labels) labels = labels.tolist() preds = preds.tolist() terms_to_sum = [(math.log(labels[i] + 1) - math.log(max(0, preds[i]) + 1)) ** 2.0 for i, pred in enumerate(labels)] return ('error', (sum(terms_to_sum) * (1.0 / len(preds))) ** 0.5) ids = test['id'] test = test.drop(['id'], axis=1) y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1729) params = {} params['objective'] = 'reg:linear' params['eta'] = 0.02 params['max_depth'] = 5 params['subsample'] = 0.8 params['colsample_bytree'] = 0.6 params['silent'] = True params['booster'] = 'gbtree' test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] chunksize = 2500000 num_rounds = 70 for train in pd.read_csv(train_filename, chunksize=chunksize, iterator=True, dtype=dtype, warn_bad_lines=True, engine='c'): y = train['Demanda_uni_equil'] X = train[test.columns.values] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=1729) test_preds = np.zeros(test.shape[0]) xg_train = xgb.DMatrix(X_train, label=y_train) xg_test = xgb.DMatrix(X_test) watchlist = [(xg_train, 'train')] xgclassifier = xgb.train(params, xg_train, num_rounds, watchlist, feval=evalerror, early_stopping_rounds=30, verbose_eval=5, xgb_model=xgclassifier) xgb.plot_importance(xgclassifier)
code
333270/cell_5
[ "text_plain_output_5.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_23.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
print('Loading Test...') dtype_test = {'id': np.uint16, 'Semana': np.uint8, 'Agencia_ID': np.uint16, 'Canal_ID': np.uint8, 'Ruta_SAK': np.uint16, 'Cliente_ID': np.uint32, 'Producto_ID': np.uint16} test.head()
code
73069993/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.model_selection import train_test_split X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') X_test.isnull().sum()
code
73069993/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.model_selection import train_test_split X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') X_train.head()
code
73069993/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73069993/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.model_selection import train_test_split X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') X_train.isnull().sum() y = X_train['target'] features = X_train.drop(['target'], axis=1, inplace=True) X_num = X_train.select_dtypes(include=['float64']) X_categorical = X_train.select_dtypes(include=['object']) X_num.corr()
code
73069993/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.model_selection import train_test_split X_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') X_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') X_train.isnull().sum()
code
1008301/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName']) labels['IsBlue'] = labels.Label.str.contains('blue') labels['Num'] = labels.Label.str.split(' ').str[1].astype(int) files = [i for i in sorted(os.listdir(base)) if 'Labels' not in i] fl = pd.read_csv(base + files[0]) fl.info()
code
1008301/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import os import numpy as np import pandas as pd import seaborn as sns from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) base = '../input/MultiSpectralImages/'
code
1008301/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName']) labels['IsBlue'] = labels.Label.str.contains('blue') labels['Num'] = labels.Label.str.split(' ').str[1].astype(int) labels.head()
code
1008301/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) labels = pd.read_csv(base + 'Labels.csv', usecols=['Label', 'FileName']) labels['IsBlue'] = labels.Label.str.contains('blue') labels['Num'] = labels.Label.str.split(' ').str[1].astype(int) files = [i for i in sorted(os.listdir(base)) if 'Labels' not in i] fl = pd.read_csv(base + files[0]) temp = fl[['X', 'Y', 'Channel0']] tmep.head()
code
73093411/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape tf.__version__ labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 4000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() model = tf.keras.models.Sequential([tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text_input'), pre_processing_layer, tf.keras.layers.Embedding(input_dim=len(vocab), output_dim=e_dim), tf.keras.layers.Masking(mask_value=0), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) valid_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA train_dataset = train_dataset.shuffle(X_train.shape[0]).batch(batch_size).with_options(options) valid_dataset = valid_dataset.batch(batch_size).with_options(options) model.fit(train_dataset, validation_data=valid_dataset, epochs=5, verbose=1) val_probs = model.predict(valid_dataset) val_preds = tf.argmax(test_probs, axis=1) y_val test_labels = tf.convert_to_tensor(list(test_labels), dtype=test_preds.dtype) cm = tf.math.confusion_matrix(test_labels, val_preds)
code
73093411/cell_13
[ "text_plain_output_1.png" ]
print(X_train.shape, y_train.shape) print(X_val.shape, y_val.shape)
code
73093411/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Recommended IND']
code
73093411/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape
code
73093411/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape tf.__version__ labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 4000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() model = tf.keras.models.Sequential([tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text_input'), pre_processing_layer, tf.keras.layers.Embedding(input_dim=len(vocab), output_dim=e_dim), tf.keras.layers.Masking(mask_value=0), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)) valid_dataset = tf.data.Dataset.from_tensor_slices((X_val, y_val)) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA train_dataset = train_dataset.shuffle(X_train.shape[0]).batch(batch_size).with_options(options) valid_dataset = valid_dataset.batch(batch_size).with_options(options) model.fit(train_dataset, validation_data=valid_dataset, epochs=5, verbose=1)
code
73093411/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Review Text'].str.split().apply(lambda x: len(x)).describe()
code
73093411/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.head()
code
73093411/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape X = data['Review Text'].values X
code
73093411/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73093411/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape tf.__version__ labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 4000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() model = tf.keras.models.Sequential([tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text_input'), pre_processing_layer, tf.keras.layers.Embedding(input_dim=len(vocab), output_dim=e_dim), tf.keras.layers.Masking(mask_value=0), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary() model.compile(optimizer='adam', loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False), metrics=metrics) print('Ready to Train')
code
73093411/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import tensorflow as tf tf.__version__
code
73093411/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape
code
73093411/cell_17
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape tf.__version__ labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape) v_size = 4000 max_len = 100 e_dim = 64 batch_size = 256 pre_processing_layer = TextVectorization(max_tokens=v_size, output_sequence_length=max_len, name='Notes_preprocessing_layer') pre_processing_layer.adapt(X_train) vocab = pre_processing_layer.get_vocabulary() model = tf.keras.models.Sequential([tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text_input'), pre_processing_layer, tf.keras.layers.Embedding(input_dim=len(vocab), output_dim=e_dim), tf.keras.layers.Masking(mask_value=0), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)), tf.keras.layers.Dense(16, activation='relu'), tf.keras.layers.Dense(output_shape, activation='softmax')]) metrics = [tf.keras.metrics.CategoricalAccuracy()] model.summary()
code
73093411/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape tf.__version__ labels = tf.keras.utils.to_categorical(data['Recommended IND']) output_shape = labels.shape[1] (labels, output_shape)
code
73093411/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/womens-ecommerce-clothing-reviews/Womens Clothing E-Commerce Reviews.csv', index_col=0) data.shape data = data[~data['Review Text'].isnull()] data.shape data['Recommended IND'].isnull().sum()
code
130022433/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from scipy import stats from scipy.stats import skew, boxcox_normmax, norm import matplotlib.gridspec as gridspec from matplotlib.ticker import MaxNLocator import warnings pd.options.display.max_columns = 250 pd.options.display.max_rows = 250 warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') test.describe().T
code
130022433/cell_6
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from scipy import stats from scipy.stats import skew, boxcox_normmax, norm import matplotlib.gridspec as gridspec from matplotlib.ticker import MaxNLocator import warnings pd.options.display.max_columns = 250 pd.options.display.max_rows = 250 warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.head(3)
code
130022433/cell_2
[ "text_plain_output_1.png" ]
!pip install --upgrade scikit-learn # Did this to use latest regressors from sklearn...
code
130022433/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from scipy import stats from scipy.stats import skew, boxcox_normmax, norm import matplotlib.gridspec as gridspec from matplotlib.ticker import MaxNLocator import warnings pd.options.display.max_columns = 250 pd.options.display.max_rows = 250 warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') test.head(3)
code
130022433/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from scipy import stats from scipy.stats import skew, boxcox_normmax, norm import matplotlib.gridspec as gridspec from matplotlib.ticker import MaxNLocator import warnings pd.options.display.max_columns = 250 pd.options.display.max_rows = 250 warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') train.describe().T
code
130022433/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from datetime import datetime from scipy import stats from scipy.stats import skew, boxcox_normmax, norm import matplotlib.gridspec as gridspec from matplotlib.ticker import MaxNLocator import warnings pd.options.display.max_columns = 250 pd.options.display.max_rows = 250 warnings.filterwarnings('ignore') plt.style.use('fivethirtyeight') import os train = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv') test = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv') print(train.shape) print(test.shape)
code
72071082/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols corr_matrix = data[numerical_cols].corr() plt.figure(figsize=(10, 10)) sns.heatmap(corr_matrix['price'].sort_values(ascending=False).to_frame()[1:], square=True, annot=True) plt.show()
code
72071082/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols corr_matrix = data[numerical_cols].corr() plt.figure(figsize=(8, 5)) sns.scatterplot(data=data, x=data['engine-size'], y=data['price']) plt.xlabel('Engine Size', fontsize=12) plt.ylabel('Price', fontsize=12) plt.title('Engine size vs Price', weight='bold', fontsize=12) plt.show() plt.figure(figsize=(8, 5)) sns.scatterplot(data=data, x='curb-weight', y='price') plt.xlabel('Curb weight', fontsize=12) plt.ylabel('Price', fontsize=12) plt.title('Curb weight vs Price', weight='bold', fontsize=12) plt.show() plt.figure(figsize=(8, 5)) sns.scatterplot(data=data, x='horsepower', y='price') plt.xlabel('Horsepower', fontsize=12) plt.ylabel('Price', fontsize=12) plt.title('HorsePower vs Price', weight='bold', fontsize=12) plt.show()
code
72071082/cell_20
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols
code
72071082/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.head()
code
72071082/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72071082/cell_28
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols categorical_cols = data.select_dtypes(['object']).columns categorical_cols print(f'Unqiue value counts of fuel-type are: ', '\n', data['fuel-type'].value_counts()) print() print(f'Unqiue value counts of aspiration are', '\n', data['aspiration'].value_counts()) print() print(f'Unqiue value counts of num-of-doors are', '\n', data['num-of-doors'].value_counts()) print() print(f'Unqiue value counts of engine-location are', '\n', data['engine-location'].value_counts())
code
72071082/cell_8
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.info()
code
72071082/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum()
code
72071082/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols corr_matrix = data[numerical_cols].corr() plt.scatter(data['symboling'], data['price']) plt.xlabel('Symboling') plt.ylabel('Price') plt.title('Symboling vs Price', weight='bold', size=12) plt.show()
code
72071082/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns data['num-of-doors'].value_counts()
code
72071082/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns
code
72071082/cell_27
[ "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() data['price'] = data['price'].replace('?', np.NaN) data['normalized-losses'] = data['normalized-losses'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data['stroke'] = data['stroke'].replace('?', np.NaN) data['horsepower'] = data['horsepower'].replace('?', np.NaN) data['peak-rpm'] = data['peak-rpm'].replace('?', np.NaN) data['bore'] = data['bore'].replace('?', np.NaN) data['num-of-doors'] = data['num-of-doors'].replace('?', np.NaN) data = data[data['price'].notna()] data.select_dtypes(['object']).columns numerical_cols = data.select_dtypes(['int32', 'int64', 'float']).columns numerical_cols categorical_cols = data.select_dtypes(['object']).columns categorical_cols
code
72071082/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/automobile-dataset/Automobile_data.csv') data.isnull().sum() print('Number of ? in columns are') for col in data.columns: if len(data[data[col] == '?']) > 0: print(col, 'has ->', len(data[data[col] == '?']))
code
32068084/cell_42
[ "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import RidgeClassifierCV from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC LG = LogisticRegression(solver='lbfgs', multi_class='multinomial') LG.fit(X_train, y_train) y_pred = LG.predict(X_valid) acc_LG = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LG PAC = PassiveAggressiveClassifier() PAC.fit(X_train, y_train) y_pred = PAC.predict(X_valid) acc_PAC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_PAC RC = RidgeClassifierCV() RC.fit(X_train, y_train) y_pred = RC.predict(X_valid) acc_RC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_RC P = Perceptron() P.fit(X_train, y_train) y_pred = P.predict(X_valid) acc_P = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_P
code
32068084/cell_21
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree
code
32068084/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') train_df_final.shape X = train_df_final.drop('label', axis=1) y = train_df_final['label'] X.isnull().values.any()
code
32068084/cell_25
[ "text_plain_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB
code
32068084/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape
code
32068084/cell_23
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc
code
32068084/cell_30
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB
code
32068084/cell_33
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb
code
32068084/cell_20
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree
code
32068084/cell_40
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC LG = LogisticRegression(solver='lbfgs', multi_class='multinomial') LG.fit(X_train, y_train) y_pred = LG.predict(X_valid) acc_LG = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LG PAC = PassiveAggressiveClassifier() PAC.fit(X_train, y_train) y_pred = PAC.predict(X_valid) acc_PAC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_PAC
code
32068084/cell_39
[ "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape train_df_final.shape X = train_df_final.drop('label', axis=1) y = train_df_final['label'] sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC LG = LogisticRegression(solver='lbfgs', multi_class='multinomial') LG.fit(X_train, y_train) y_pred = LG.predict(X_valid) acc_LG = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LG coeff_df = pd.DataFrame(train_df_final.columns.delete(0)) coeff_df.columns = ['Feature'] coeff_df['Correlation'] = pd.Series(LG.coef_[0]) coeff_df.sort_values(by='Correlation', ascending=False)
code
32068084/cell_41
[ "text_html_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import RidgeClassifierCV from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC LG = LogisticRegression(solver='lbfgs', multi_class='multinomial') LG.fit(X_train, y_train) y_pred = LG.predict(X_valid) acc_LG = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LG PAC = PassiveAggressiveClassifier() PAC.fit(X_train, y_train) y_pred = PAC.predict(X_valid) acc_PAC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_PAC RC = RidgeClassifierCV() RC.fit(X_train, y_train) y_pred = RC.predict(X_valid) acc_RC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_RC
code
32068084/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt import matplotlib.pylab as pylab from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler as ss from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV pd.set_option('display.max_columns', None) from sklearn.tree import DecisionTreeClassifier from sklearn.tree import ExtraTreeClassifier from sklearn.ensemble import VotingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier import xgboost from xgboost import XGBClassifier from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.linear_model import LogisticRegression from sklearn.linear_model import SGDClassifier from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.linear_model import RidgeClassifierCV from sklearn.linear_model import Perceptron from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.svm import LinearSVC from sklearn.svm import NuSVC from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis from sklearn.naive_bayes import BernoulliNB from sklearn.naive_bayes import GaussianNB from sklearn.metrics import accuracy_score, confusion_matrix from sklearn import decomposition print('Setup Complete')
code