path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
50222118/cell_24 | [
"image_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def create_mean_std_df(df):
means = df.mean()
stds = df.std()
ret_df = pd.concat([means, stds], axis=1).reset_index()
ret_df.columns = ['index', 'mean', 'std']
ret_df['pref'] = ret_df['index'].apply(lambda x: x.split('-')[0])
ret_df['post'] = ret_df['index'].apply(lambda x: x.split('-')[1])
return ret_df
def scatter(y, x=None, color= None):
fig, ax = plt.subplots()
un_c = color.unique()
for i, col in enumerate(un_c):
y_i = y[color == col]
if type(x) != pd.core.series.Series:
x_i = range(0 + i * (y.shape[0] - y_i.shape[0]),
(1 - i)*y_i.shape[0] + i * y.shape[0])
else:
x_i = x[color == col]
ax.scatter(x_i, y_i, c=col, label=col,
alpha=0.3)
ax.set_ylabel(y.name)
ax.legend()
ax.grid(True)
plt.show()
tr = create_mean_std_df(train_features.iloc[:, 4:])
corrMatrix = train_features.corr()
def mul_lab_logreg(test, train_X, train_y):
sub = pd.DataFrame(test['sig_id'])
col = train_y.columns.drop('sig_id')
train_X.set_index('sig_id', inplace=True)
df = pd.concat([train_X.iloc[:, 0], train_y.set_index('sig_id')], axis=1)
for c in tqdm(col):
y = df.loc[:, c]
clf = LogisticRegression(random_state=0, class_weight=y.mean(), n_jobs=6).fit(train_X, y)
clf.fit(train_X, y)
sub[c] = clf.predict_proba(test.drop('sig_id', axis=1)).T[1]
return sub
clf = RandomForestClassifier(n_estimators=15, criterion='entropy', max_depth=15, max_samples=150, max_features=0.3, verbose=1, n_jobs=-1, random_state=1998, ccp_alpha=0.0)
clf.fit(train_features.set_index('sig_id'), train_targets_scored.set_index('sig_id')) | code |
50222118/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def create_mean_std_df(df):
means = df.mean()
stds = df.std()
ret_df = pd.concat([means, stds], axis=1).reset_index()
ret_df.columns = ['index', 'mean', 'std']
ret_df['pref'] = ret_df['index'].apply(lambda x: x.split('-')[0])
ret_df['post'] = ret_df['index'].apply(lambda x: x.split('-')[1])
return ret_df
def scatter(y, x=None, color= None):
fig, ax = plt.subplots()
un_c = color.unique()
for i, col in enumerate(un_c):
y_i = y[color == col]
if type(x) != pd.core.series.Series:
x_i = range(0 + i * (y.shape[0] - y_i.shape[0]),
(1 - i)*y_i.shape[0] + i * y.shape[0])
else:
x_i = x[color == col]
ax.scatter(x_i, y_i, c=col, label=col,
alpha=0.3)
ax.set_ylabel(y.name)
ax.legend()
ax.grid(True)
plt.show()
tr = create_mean_std_df(train_features.iloc[:, 4:])
corrMatrix = train_features.corr()
plt.imshow(corrMatrix)
plt.show() | code |
50222118/cell_10 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
def create_mean_std_df(df):
means = df.mean()
stds = df.std()
ret_df = pd.concat([means, stds], axis=1).reset_index()
ret_df.columns = ['index', 'mean', 'std']
ret_df['pref'] = ret_df['index'].apply(lambda x: x.split('-')[0])
ret_df['post'] = ret_df['index'].apply(lambda x: x.split('-')[1])
return ret_df
def scatter(y, x=None, color= None):
fig, ax = plt.subplots()
un_c = color.unique()
for i, col in enumerate(un_c):
y_i = y[color == col]
if type(x) != pd.core.series.Series:
x_i = range(0 + i * (y.shape[0] - y_i.shape[0]),
(1 - i)*y_i.shape[0] + i * y.shape[0])
else:
x_i = x[color == col]
ax.scatter(x_i, y_i, c=col, label=col,
alpha=0.3)
ax.set_ylabel(y.name)
ax.legend()
ax.grid(True)
plt.show()
tr = create_mean_std_df(train_features.iloc[:, 4:])
scatter(y=tr['std'], color=tr['pref']) | code |
50222118/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | train_features[['cp_type']].value_counts() | code |
73090447/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
trainNum = train.select_dtypes(include=['int64', 'float64'])
corrMat = trainNum.corr()
corrY = corrMat[['SalePrice']].sort_values(by = 'SalePrice')
cmap = sb.diverging_palette(20, 220, n=10)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
sb.heatmap(corrMat, cmap = cmap, ax = axes[0])
sb.heatmap(corrY, cmap = cmap, ax = axes[1])
sb.boxplot(x='OverallQual', y='SalePrice', data=train) | code |
73090447/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
train.hist(column='SalePrice', bins=50) | code |
73090447/cell_11 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
trainNum = train.select_dtypes(include=['int64', 'float64'])
corrMat = trainNum.corr()
corrY = corrMat[['SalePrice']].sort_values(by='SalePrice')
cmap = sb.diverging_palette(20, 220, n=10)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
sb.heatmap(corrMat, cmap=cmap, ax=axes[0])
sb.heatmap(corrY, cmap=cmap, ax=axes[1]) | code |
73090447/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape | code |
73090447/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
train.head() | code |
73090447/cell_15 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sb
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
trainNum = train.select_dtypes(include=['int64', 'float64'])
corrMat = trainNum.corr()
corrY = corrMat[['SalePrice']].sort_values(by = 'SalePrice')
cmap = sb.diverging_palette(20, 220, n=10)
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
sb.heatmap(corrMat, cmap = cmap, ax = axes[0])
sb.heatmap(corrY, cmap = cmap, ax = axes[1])
train = train[~((train.OverallQual == 4) & (train.SalePrice >= 200000)) & ~((train.OverallQual == 8) & (train.SalePrice >= 500000))]
train.shape | code |
73090447/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id')
test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv', index_col='Id')
train.shape
train[['SalePrice']].describe() | code |
128033200/cell_9 | [
"text_html_output_1.png"
] | from itertools import chain
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1 | code |
128033200/cell_4 | [
"text_html_output_1.png"
] | from itertools import chain
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape | code |
128033200/cell_20 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from itertools import chain
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
powercount = {}
powerlabels = np.unique(df1['powerlabel'])
for p in powerlabels:
powercount[p] = np.count_nonzero(df1['powerlabel'] == p)
train_inds, val_inds = train_test_split(np.array(list(range(df1.shape[0]))), test_size=0.2, random_state=0)
train_df = df1.loc[train_inds, :].reset_index(drop=True)
train_df.drop({'taxonomy', 'three_label_taxonomy'}, inplace=True, axis=1)
val_df = df1.loc[val_inds, :].reset_index(drop=True)
val_df.drop({'taxonomy', 'three_label_taxonomy'}, inplace=True, axis=1)
train_df[train_df['Footwear>Kids>Sandals'] == 1] | code |
128033200/cell_6 | [
"text_html_output_1.png"
] | from itertools import chain
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1.head() | code |
128033200/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import numpy as np
import os
from tqdm import tqdm
from itertools import chain
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras.backend as K
import shutil, os, time, random, copy
import imageio
import h5py
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, roc_curve, f1_score, precision_recall_curve, confusion_matrix, average_precision_score
import seaborn as sns
from skimage.transform import rotate, AffineTransform, warp, resize
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Conv2D, Dense, MaxPool2D, GlobalAveragePooling2D, Input
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.utils import Sequence
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input | code |
128033200/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
powercount = {}
powerlabels = np.unique(df1['powerlabel'])
for p in powerlabels:
powercount[p] = np.count_nonzero(df1['powerlabel'] == p)
train_inds, val_inds = train_test_split(np.array(list(range(df1.shape[0]))), test_size=0.2, random_state=0)
df1 | code |
128033200/cell_8 | [
"text_plain_output_1.png"
] | from itertools import chain
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1.head(1) | code |
128033200/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from itertools import chain
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
powercount = {}
powerlabels = np.unique(df1['powerlabel'])
for p in powerlabels:
powercount[p] = np.count_nonzero(df1['powerlabel'] == p)
powercount | code |
128033200/cell_14 | [
"text_html_output_1.png"
] | from itertools import chain
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
powercount = {}
powerlabels = np.unique(df1['powerlabel'])
for p in powerlabels:
powercount[p] = np.count_nonzero(df1['powerlabel'] == p)
len(powercount) | code |
128033200/cell_22 | [
"text_plain_output_1.png"
] | from itertools import chain
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
powercount = {}
powerlabels = np.unique(df1['powerlabel'])
for p in powerlabels:
powercount[p] = np.count_nonzero(df1['powerlabel'] == p)
train_inds, val_inds = train_test_split(np.array(list(range(df1.shape[0]))), test_size=0.2, random_state=0)
train_df = df1.loc[train_inds, :].reset_index(drop=True)
train_df.drop({'taxonomy', 'three_label_taxonomy'}, inplace=True, axis=1)
val_df = df1.loc[val_inds, :].reset_index(drop=True)
val_df.drop({'taxonomy', 'three_label_taxonomy'}, inplace=True, axis=1)
df1.iloc[7808, 3:] | code |
128033200/cell_10 | [
"text_plain_output_1.png"
] | from itertools import chain
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count | code |
128033200/cell_12 | [
"text_html_output_1.png"
] | from itertools import chain
from tqdm import tqdm
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
DATAFRAME_PATH = '/kaggle/input/productcategorization2/image_taxonomy_and_description.csv'
IMAGE_DIRECTORY_PATH = '/kaggle/input/productcategorization2/new_images/new_images'
images_list = [int(iter.split('.')[0]) for iter in os.listdir(IMAGE_DIRECTORY_PATH)]
df = pd.read_csv(DATAFRAME_PATH)
df1 = df.copy()
df1 = df1[df1['product_id'].isin(images_list)]
df1.reset_index(drop=True, inplace=True)
df1.drop({'long_description', 'short_description'}, axis=1, inplace=True)
unique_labels_having_one_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 0]).to_list()))))
unique_labels_having_two_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 1]).to_list()))))
unique_labels_having_three_cat = sorted(list(set(chain.from_iterable(df['taxonomy'].apply(lambda x: [iter for iter in x.split('|') if iter.count('>') == 2]).to_list()))))
df1.shape
df1['three_label_taxonomy'] = df.apply(lambda row: [iter for iter in row['taxonomy'].split('|') if iter.count('>') == 2], axis=1)
for iter in unique_labels_having_three_cat:
df1[iter] = 0
for idx, row in df1.iterrows():
cats = row['three_label_taxonomy']
for it in cats:
df1.loc[idx, it] += 1
df1['powerlabel'] = 0
for idx, row in tqdm(df1.iterrows()):
count = 0
for i, iter in enumerate(unique_labels_having_three_cat):
count += np.ceil(1.01 ** (203 - i - 1)) * row[iter]
df1.loc[idx, 'powerlabel'] = count
max = int(df1['powerlabel'].max())
df1['powerlabel'].hist(bins=np.array(range(1, max + 2)) - 0.5) | code |
1001261/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv') | code |
128035984/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames[:3]:
print(os.path.join(dirname, filename))
if len(filenames) > 3:
print('...') | code |
128035984/cell_8 | [
"text_plain_output_1.png"
] | from torch.utils.data import Dataset, DataLoader
import csv
import cv2
import numpy as np
import numpy as np # linear algebra
import random
import torch
import torch.nn as nn
TRAIN_PATH = '/kaggle/input/captcha-hacker-2023-spring/dataset/train'
TEST_PATH = '/kaggle/input/captcha-hacker-2023-spring/dataset/test'
device = 'cpu'
alphabets = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
alphabets2index = {alphabet: i for i, alphabet in enumerate(alphabets)}
class Task1Dataset(Dataset):
def __init__(self, data, root, return_filename=False):
self.data = [sample for sample in data if sample[0].startswith('task1')]
self.return_filename = return_filename
self.root = root
def __getitem__(self, index):
filename, label = self.data[index]
img = cv2.imread(f'{self.root}/{filename}')
img = cv2.resize(img, (32, 32))
img = np.mean(img, axis=2)
if self.return_filename:
return (torch.FloatTensor((img - 128) / 128), filename)
else:
return (torch.FloatTensor((img - 128) / 128), alphabets2index[label])
def __len__(self):
return len(self.data)
class Model(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.Sequential(nn.Linear(1024, 512), nn.LeakyReLU(), nn.Linear(512, len(alphabets)))
def forward(self, x):
batch_size, h, w = x.shape
x = x.view(batch_size, h * w)
return self.layers(x)
train_data = []
val_data = []
with open(f'{TRAIN_PATH}/annotations.csv', newline='') as csvfile:
for row in csv.reader(csvfile, delimiter=','):
if random.random() < 0.8:
train_data.append(row)
else:
val_data.append(row)
train_ds = Task1Dataset(train_data, root=TRAIN_PATH)
train_dl = DataLoader(train_ds, batch_size=100, num_workers=4, drop_last=True, shuffle=True)
val_ds = Task1Dataset(val_data, root=TRAIN_PATH)
val_dl = DataLoader(val_ds, batch_size=100, num_workers=4, drop_last=False, shuffle=False)
model = Model().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
for epoch in range(50):
print(f'Epoch [{epoch}]')
model.train()
for image, label in train_dl:
image = image.to(device)
label = label.to(device)
pred = model(image)
loss = loss_fn(pred, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sample_count = 0
correct_count = 0
model.eval()
for image, label in val_dl:
image = image.to(device)
label = label.to(device)
pred = model(image)
loss = loss_fn(pred, label)
pred = torch.argmax(pred, dim=1)
sample_count += len(image)
correct_count += (label == pred).sum()
print('accuracy (validation):', correct_count / sample_count) | code |
34149992/cell_4 | [
"text_plain_output_1.png"
] | X_train | code |
34149992/cell_8 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
import os
import pandas as pd
pd.set_option('display.max_colwidth', None)
folder = '../input/nlp-getting-started'
test = pd.read_csv(os.path.join(folder, 'test.csv'), index_col='id')
train = pd.read_csv(os.path.join(folder, 'train.csv'), index_col='id')
X = train.drop(columns='target')
y = train['target']
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33, random_state=42)
X['text'].iloc[10] | code |
34149992/cell_10 | [
"text_html_output_1.png"
] | from sklearn.model_selection import train_test_split
import os
import pandas as pd
import spacy
pd.set_option('display.max_colwidth', None)
folder = '../input/nlp-getting-started'
test = pd.read_csv(os.path.join(folder, 'test.csv'), index_col='id')
train = pd.read_csv(os.path.join(folder, 'train.csv'), index_col='id')
X = train.drop(columns='target')
y = train['target']
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33, random_state=42)
nlp = spacy.load('en_core_web_sm')
doc = nlp(X['text'].iloc[0])
for token in doc:
print(token.text, token.lemma_, token.dep_, token.pos_) | code |
72080311/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.mlab as mlab # some MATLAB commands
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random # generating (pseudo)-random numbers
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
ids = [path.split('/')[-1].split('.')[0] for path in training_paths]
paths_df = pd.DataFrame({'path': training_paths, 'id': ids})
train_data = pd.merge(left=training_labels, right=paths_df, on='id')
def load_random_file(signal=None):
"""Selecting a random file from the training dataset.
Args:
signal: bool
optional flag defining whether to select pure detector
noise (False) or detector noise plus simulated signal (True).
If skipped, the flag is chosen randomly.
Returns:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
if signal is None:
signal = random.choice([True, False])
filtered = train_data['target'] == signal
index = random.choice(train_data[filtered].index)
file_id = train_data['id'].at[index]
target = train_data['target'].at[index]
path = train_data['path'].at[index]
data = np.load(path)
return (file_id, target, data)
file_id, target, data = load_random_file()
ylim = 1.1*np.max(data)
plt.style.use('ggplot')
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(10, 5))
for i in range(3):
ax = axs.ravel()[i]
ax.plot(data[i])
ax.margins(0)
axs[i].set_title(f"Detector {i+1}", loc='center')
ax.set_ylabel(f"Amplitude")
ax.set_ylim([-ylim, ylim])
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
axs[2].set_xlabel("Time stamp")
fig.suptitle(f"Raw data visualization. ID: {file_id}. Target: {target}.")
plt.show()
fs = 2048
NFFT = 4 * fs
f_min = 20.0
f_max = fs / 2
_, target, data = load_random_file(True)
strain1, strain2, strain3 = (data[0], data[1], data[2])
Pxx_1, freqs = mlab.psd(strain1, Fs=fs, NFFT=NFFT)
Pxx_2, freqs = mlab.psd(strain2, Fs=fs, NFFT=NFFT)
Pxx_3, freqs = mlab.psd(strain3, Fs=fs, NFFT=NFFT)
psd_1 = interp1d(freqs, Pxx_1)
psd_2 = interp1d(freqs, Pxx_2)
psd_3 = interp1d(freqs, Pxx_3)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 5))
ax.loglog(freqs, np.sqrt(Pxx_1), 'g', label='Detector 1')
ax.loglog(freqs, np.sqrt(Pxx_2), 'r', label='Detector 2')
ax.loglog(freqs, np.sqrt(Pxx_3), 'b', label='Detector 3')
ax.set_xlim([f_min, f_max])
ax.set_ylabel('ASD (strain/$\\sqrt{Hz}$)')
ax.set_xlabel('Frequency (Hz)')
ax.legend()
plt.show() | code |
72080311/cell_9 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_labels['target'].value_counts() | code |
72080311/cell_23 | [
"image_output_1.png"
] | !pip -q install pycbc
import pycbc | code |
72080311/cell_30 | [
"text_plain_output_1.png"
] | import matplotlib.mlab as mlab # some MATLAB commands
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pycbc
import random # generating (pseudo)-random numbers
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
ids = [path.split('/')[-1].split('.')[0] for path in training_paths]
paths_df = pd.DataFrame({'path': training_paths, 'id': ids})
train_data = pd.merge(left=training_labels, right=paths_df, on='id')
def load_random_file(signal=None):
"""Selecting a random file from the training dataset.
Args:
signal: bool
optional flag defining whether to select pure detector
noise (False) or detector noise plus simulated signal (True).
If skipped, the flag is chosen randomly.
Returns:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
if signal is None:
signal = random.choice([True, False])
filtered = train_data['target'] == signal
index = random.choice(train_data[filtered].index)
file_id = train_data['id'].at[index]
target = train_data['target'].at[index]
path = train_data['path'].at[index]
data = np.load(path)
return (file_id, target, data)
file_id, target, data = load_random_file()
ylim = 1.1*np.max(data)
plt.style.use('ggplot')
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(10, 5))
for i in range(3):
ax = axs.ravel()[i]
ax.plot(data[i])
ax.margins(0)
axs[i].set_title(f"Detector {i+1}", loc='center')
ax.set_ylabel(f"Amplitude")
ax.set_ylim([-ylim, ylim])
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
axs[2].set_xlabel("Time stamp")
fig.suptitle(f"Raw data visualization. ID: {file_id}. Target: {target}.")
plt.show()
fs = 2048
NFFT = 4 * fs
f_min = 20.0
f_max = fs / 2
_, target, data = load_random_file(True)
strain1, strain2, strain3 = data[0], data[1], data[2]
Pxx_1, freqs = mlab.psd(strain1, Fs = fs, NFFT = NFFT)
Pxx_2, freqs = mlab.psd(strain2, Fs = fs, NFFT = NFFT)
Pxx_3, freqs = mlab.psd(strain3, Fs = fs, NFFT = NFFT)
psd_1 = interp1d(freqs, Pxx_1)
psd_2 = interp1d(freqs, Pxx_2)
psd_3 = interp1d(freqs, Pxx_3)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 5))
ax.loglog(freqs, np.sqrt(Pxx_1),"g",label="Detector 1")
ax.loglog(freqs, np.sqrt(Pxx_2),"r",label="Detector 2")
ax.loglog(freqs, np.sqrt(Pxx_3),"b",label="Detector 3")
ax.set_xlim([f_min, f_max])
ax.set_ylabel("ASD (strain/$\sqrt{Hz}$)")
ax.set_xlabel("Frequency (Hz)")
ax.legend()
plt.show()
def generate_qtransform(data, fs):
"""Function for generating constant Q-transform.
Args:
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
fs: int
sampling frequency
Returns:
times: numpy.ndarray
array of time bins
freqs: numpy.ndarray
array of frequency bins
qplanes: list
list with 3 elements corresponding to each detector in the raw
data file. Each element is a 2-d vector of the power in each
time-frequency bin
"""
qplanes = []
for i in range(len(data)):
ts = pycbc.types.TimeSeries(data[i, :], epoch=0, delta_t=1.0 / fs)
ts = ts.whiten(0.125, 0.125)
times, freqs, qplane = ts.qtransform(0.002, logfsteps=100, qrange=(10, 10), frange=(20, 512))
qplanes.append(qplane)
return (times, freqs, qplanes)
def plot_qtransform(file_id, target, data):
"""Plotting constant Q-transform data.
Args:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
times, freqs, qplanes = generate_qtransform(data, fs=fs)
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(12, 8))
for i in range(3):
axs[i].pcolormesh(times, freqs, qplanes[i], shading = 'auto')
axs[i].set_yscale('log')
axs[i].set_ylabel('Frequency (Hz)')
axs[i].set_xlabel('Time (s)')
axs[i].set_title(f"Detector {i+1}", loc='left')
axs[i].grid(False)
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
fig.suptitle(f"Q transform visualization. ID: {file_id}. Target: {target}.", fontsize=16)
plt.show()
file_id = '7945e449f3'
target = 1
data = np.load(train_data[train_data['id'] == file_id]['path'].values[0])
plot_qtransform(file_id, target, data) | code |
72080311/cell_11 | [
"text_html_output_1.png"
] | training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
print('The total number of files in the training set:', len(training_paths)) | code |
72080311/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random # generating (pseudo)-random numbers
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
ids = [path.split('/')[-1].split('.')[0] for path in training_paths]
paths_df = pd.DataFrame({'path': training_paths, 'id': ids})
train_data = pd.merge(left=training_labels, right=paths_df, on='id')
def load_random_file(signal=None):
"""Selecting a random file from the training dataset.
Args:
signal: bool
optional flag defining whether to select pure detector
noise (False) or detector noise plus simulated signal (True).
If skipped, the flag is chosen randomly.
Returns:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
if signal is None:
signal = random.choice([True, False])
filtered = train_data['target'] == signal
index = random.choice(train_data[filtered].index)
file_id = train_data['id'].at[index]
target = train_data['target'].at[index]
path = train_data['path'].at[index]
data = np.load(path)
return (file_id, target, data)
file_id, target, data = load_random_file()
ylim = 1.1 * np.max(data)
plt.style.use('ggplot')
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(10, 5))
for i in range(3):
ax = axs.ravel()[i]
ax.plot(data[i])
ax.margins(0)
axs[i].set_title(f'Detector {i + 1}', loc='center')
ax.set_ylabel(f'Amplitude')
ax.set_ylim([-ylim, ylim])
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
axs[2].set_xlabel('Time stamp')
fig.suptitle(f'Raw data visualization. ID: {file_id}. Target: {target}.')
plt.show() | code |
72080311/cell_28 | [
"image_output_1.png"
] | import matplotlib.mlab as mlab # some MATLAB commands
import matplotlib.pyplot as plt # plotting
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pycbc
import random # generating (pseudo)-random numbers
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
ids = [path.split('/')[-1].split('.')[0] for path in training_paths]
paths_df = pd.DataFrame({'path': training_paths, 'id': ids})
train_data = pd.merge(left=training_labels, right=paths_df, on='id')
def load_random_file(signal=None):
"""Selecting a random file from the training dataset.
Args:
signal: bool
optional flag defining whether to select pure detector
noise (False) or detector noise plus simulated signal (True).
If skipped, the flag is chosen randomly.
Returns:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
if signal is None:
signal = random.choice([True, False])
filtered = train_data['target'] == signal
index = random.choice(train_data[filtered].index)
file_id = train_data['id'].at[index]
target = train_data['target'].at[index]
path = train_data['path'].at[index]
data = np.load(path)
return (file_id, target, data)
file_id, target, data = load_random_file()
ylim = 1.1*np.max(data)
plt.style.use('ggplot')
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(10, 5))
for i in range(3):
ax = axs.ravel()[i]
ax.plot(data[i])
ax.margins(0)
axs[i].set_title(f"Detector {i+1}", loc='center')
ax.set_ylabel(f"Amplitude")
ax.set_ylim([-ylim, ylim])
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
axs[2].set_xlabel("Time stamp")
fig.suptitle(f"Raw data visualization. ID: {file_id}. Target: {target}.")
plt.show()
fs = 2048
NFFT = 4 * fs
f_min = 20.0
f_max = fs / 2
_, target, data = load_random_file(True)
strain1, strain2, strain3 = data[0], data[1], data[2]
Pxx_1, freqs = mlab.psd(strain1, Fs = fs, NFFT = NFFT)
Pxx_2, freqs = mlab.psd(strain2, Fs = fs, NFFT = NFFT)
Pxx_3, freqs = mlab.psd(strain3, Fs = fs, NFFT = NFFT)
psd_1 = interp1d(freqs, Pxx_1)
psd_2 = interp1d(freqs, Pxx_2)
psd_3 = interp1d(freqs, Pxx_3)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 5))
ax.loglog(freqs, np.sqrt(Pxx_1),"g",label="Detector 1")
ax.loglog(freqs, np.sqrt(Pxx_2),"r",label="Detector 2")
ax.loglog(freqs, np.sqrt(Pxx_3),"b",label="Detector 3")
ax.set_xlim([f_min, f_max])
ax.set_ylabel("ASD (strain/$\sqrt{Hz}$)")
ax.set_xlabel("Frequency (Hz)")
ax.legend()
plt.show()
def generate_qtransform(data, fs):
"""Function for generating constant Q-transform.
Args:
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
fs: int
sampling frequency
Returns:
times: numpy.ndarray
array of time bins
freqs: numpy.ndarray
array of frequency bins
qplanes: list
list with 3 elements corresponding to each detector in the raw
data file. Each element is a 2-d vector of the power in each
time-frequency bin
"""
qplanes = []
for i in range(len(data)):
ts = pycbc.types.TimeSeries(data[i, :], epoch=0, delta_t=1.0 / fs)
ts = ts.whiten(0.125, 0.125)
times, freqs, qplane = ts.qtransform(0.002, logfsteps=100, qrange=(10, 10), frange=(20, 512))
qplanes.append(qplane)
return (times, freqs, qplanes)
def plot_qtransform(file_id, target, data):
"""Plotting constant Q-transform data.
Args:
file_id: str
unique id of the selected file
target: int
0 or 1, target value
data: numpy.ndarray
numpy array in the shape (3, 4096), where 3 is the number
of detectors, 4096 is number of data points (each time series
instance spans over 2 seconds and is sampled at 2048 Hz)
"""
times, freqs, qplanes = generate_qtransform(data, fs=fs)
fig, axs = plt.subplots(ncols=1, nrows=3, figsize=(12, 8))
for i in range(3):
axs[i].pcolormesh(times, freqs, qplanes[i], shading = 'auto')
axs[i].set_yscale('log')
axs[i].set_ylabel('Frequency (Hz)')
axs[i].set_xlabel('Time (s)')
axs[i].set_title(f"Detector {i+1}", loc='left')
axs[i].grid(False)
axs[0].xaxis.set_visible(False)
axs[1].xaxis.set_visible(False)
fig.suptitle(f"Q transform visualization. ID: {file_id}. Target: {target}.", fontsize=16)
plt.show()
file_id, target, data = load_random_file()
plot_qtransform(file_id, target, data) | code |
72080311/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_labels.head(3) | code |
72080311/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
pd.set_option('display.max_colwidth', None)
import matplotlib.pyplot as plt
from glob import glob
import random
import matplotlib.mlab as mlab
from scipy.interpolate import interp1d
training_labels_path = '../input/g2net-gravitational-wave-detection/training_labels.csv'
training_labels = pd.read_csv(training_labels_path)
training_paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*')
ids = [path.split('/')[-1].split('.')[0] for path in training_paths]
paths_df = pd.DataFrame({'path': training_paths, 'id': ids})
train_data = pd.merge(left=training_labels, right=paths_df, on='id')
train_data.head(3) | code |
34134310/cell_21 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
mx_exemplo = np.array([[1.76, 75], [1.8, 97.3]])
mx_exemplo.T | code |
34134310/cell_23 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
plt.figure(figsize=(30, 30))
sns.heatmap(mx_cov, annot=True) | code |
34134310/cell_33 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
mx_exemplo = np.array([[1.76, 75], [1.8, 97.3]])
mx_exemplo.T
np.shape(mx_cov)
autovalores, autovetores = np.linalg.eig(mx_cov)
tp_componentes = tuple(zip(autovalores, autovetores))
tp_componentes
total = sum(autovalores)
var_acum = [autovalor / total for autovalor in sorted(autovalores, reverse=True)]
var_acum
componentes = np.argmax(np.cumsum(var_acum) >= 0.95)
ft_ds = tp_componentes[:componentes]
ft_vetor = list()
for val, vet in ft_ds:
ft_vetor.append(vet)
df = pd.DataFrame(np.array(ft_vetor).T[:15])
np.shape(ft_vetor)
featured_ds = np.dot(x[:, :15], np.array(ft_vetor).T[:15]) + x.mean()
df = pd.DataFrame(featured_ds)
df | code |
34134310/cell_29 | [
"text_plain_output_1.png"
] | tp_componentes = tuple(zip(autovalores, autovetores))
tp_componentes
sorted(tp_componentes, reverse=True) | code |
34134310/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
mx_exemplo = np.array([[1.76, 75], [1.8, 97.3]])
mx_exemplo.T
np.shape(mx_cov)
autovalores, autovetores = np.linalg.eig(mx_cov)
print('Autovalores', autovalores)
print('Autovetores', autovetores) | code |
34134310/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34134310/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
mx_exemplo = np.array([[1.76, 75], [1.8, 97.3]])
mx_exemplo.T
np.shape(mx_cov)
autovalores, autovetores = np.linalg.eig(mx_cov)
tp_componentes = tuple(zip(autovalores, autovetores))
tp_componentes
total = sum(autovalores)
var_acum = [autovalor / total for autovalor in sorted(autovalores, reverse=True)]
var_acum
componentes = np.argmax(np.cumsum(var_acum) >= 0.95)
ft_ds = tp_componentes[:componentes]
ft_vetor = list()
for val, vet in ft_ds:
ft_vetor.append(vet)
df = pd.DataFrame(np.array(ft_vetor).T[:15])
np.shape(ft_vetor) | code |
34134310/cell_28 | [
"text_plain_output_1.png"
] | tp_componentes = tuple(zip(autovalores, autovetores))
tp_componentes | code |
34134310/cell_31 | [
"text_plain_output_1.png"
] | total = sum(autovalores)
var_acum = [autovalor / total for autovalor in sorted(autovalores, reverse=True)]
var_acum | code |
34134310/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_fifa = pd.read_csv('/kaggle/input/fifa19/data.csv')
columns_to_drop = ['Unnamed: 0', 'ID', 'Name', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage', 'Special', 'Preferred Foot', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB', 'Release Clause']
df_fifa.drop(columns_to_drop, axis=1, inplace=True)
df_fifa = df_fifa.dropna()
x = df_fifa.drop('Overall', axis=1).values
y = df_fifa.Overall.values
x = StandardScaler().fit_transform(x)
mx_cov = np.cov(x, rowvar=False)
mx_exemplo = np.array([[1.76, 75], [1.8, 97.3]])
mx_exemplo.T
np.shape(mx_cov) | code |
2007135/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X.head() | code |
2007135/cell_25 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.values
y = y.values
lin_reg = LinearRegression()
y_pred_lr = cross_val_predict(lin_reg, X, y, cv=6)
accuracy_lf = metrics.r2_score(y, y_pred_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=6)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=6)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
rf_regressor = RandomForestRegressor(n_estimators=300, random_state=0)
y_pred_rf = cross_val_predict(rf_regressor, X, y, cv=6)
accuracy_rf = metrics.r2_score(y, y_pred_rf)
print('Cross-Predicted Random Forest Regression Accuracy: ', accuracy_rf) | code |
2007135/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum() | code |
2007135/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns) | code |
2007135/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.head() | code |
2007135/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X.head() | code |
2007135/cell_19 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.values
y = y.values
lin_reg = LinearRegression()
y_pred_lr = cross_val_predict(lin_reg, X, y, cv=6)
accuracy_lf = metrics.r2_score(y, y_pred_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=6)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
print('Cross-Predicted Polynominal Regression Accuracy: ', accuracy_pl) | code |
2007135/cell_16 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, cross_val_predict
import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.values
y = y.values
lin_reg = LinearRegression()
y_pred_lr = cross_val_predict(lin_reg, X, y, cv=6)
accuracy_lf = metrics.r2_score(y, y_pred_lr)
print('Cross-Predicted Mutiple Linear Regression Accuracy: ', accuracy_lf) | code |
2007135/cell_22 | [
"text_html_output_1.png"
] | from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
X = pd.get_dummies(data=X, columns=['MSZoning', 'Street', 'LotShape', 'LandContour', 'Utilities', 'LotConfig', 'LandSlope', 'Neighborhood', 'Condition1', 'Condition2', 'BldgType', 'HouseStyle', 'RoofStyle', 'RoofMatl', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'ExterCond', 'Foundation', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'Heating', 'HeatingQC', 'CentralAir', 'Electrical', 'SaleType', 'SaleCondition', 'KitchenQual', 'Functional', 'GarageType', 'GarageFinish', 'GarageQual', 'GarageCond', 'PavedDrive'], drop_first=True)
X = X.values
y = y.values
lin_reg = LinearRegression()
y_pred_lr = cross_val_predict(lin_reg, X, y, cv=6)
accuracy_lf = metrics.r2_score(y, y_pred_lr)
poly_reg = PolynomialFeatures(degree=2)
X_poly = poly_reg.fit_transform(X)
lin_reg_pl = LinearRegression()
lin_reg_pl = LinearRegression()
y_pred_pl = cross_val_predict(lin_reg_pl, X_poly, y, cv=6)
accuracy_pl = metrics.r2_score(y, y_pred_pl)
dt_regressor = DecisionTreeRegressor(random_state=0)
y_pred_dt = cross_val_predict(dt_regressor, X, y, cv=6)
accuracy_dt = metrics.r2_score(y, y_pred_dt)
print('Cross-Predicted Decision Tree Regression Accuracy: ', accuracy_dt) | code |
2007135/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd # data processing
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
from sklearn.preprocessing import Imputer
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
dataset = pd.read_csv('../input/train.csv')
dataset.isnull().sum()
dataset = dataset.drop(['Id', 'LotFrontage', 'Alley', 'FireplaceQu', 'PoolQC', 'Fence', 'MiscFeature'], axis=1)
len(dataset.columns)
dataset = dataset.dropna(thresh=70)
X = dataset.iloc[:, 0:-1]
y = dataset.iloc[:, -1]
y[0:5] | code |
129033726/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
sns.kdeplot(data['Age'], shade=True)
plt.xlabel('Age')
plt.ylabel('Density')
plt.title('Age Distribution')
plt.show() | code |
129033726/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.head() | code |
129033726/cell_25 | [
"image_output_1.png"
] | from scipy.stats import f_oneway
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr() | code |
129033726/cell_33 | [
"text_plain_output_1.png"
] | from scipy.stats import f_oneway
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
import numpy as np #to perform arithmetic operation
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
"""Here 001 represent spain
100 represent france
010 represent germany"""
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
X = np.delete(X, 0, axis=1)
print(X) | code |
129033726/cell_20 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
counts = data['Geography'].value_counts()
counts = data['HasCrCard'].value_counts()
counts = data['IsActiveMember'].value_counts()
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
contingency_table = pd.crosstab(data['Geography'], data['Exited'])
contingency_table = pd.crosstab(data['Gender'], data['Exited'])
contingency_table.plot(kind='bar')
plt.xlabel('Gender')
plt.ylabel('Count')
plt.title('Distribution of Gender by Churn')
plt.show() | code |
129033726/cell_55 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
from scipy.stats import f_oneway
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np #to perform arithmetic operation
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
"""Here 001 represent spain
100 represent france
010 represent germany"""
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
X = np.delete(X, 0, axis=1)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered'
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
from sklearn.metrics import confusion_matrix, accuracy_score
con_matrix = confusion_matrix(y_pred, y_test)
accuracy = accuracy_score(y_pred, y_test)
print(X[0])
type(X)
X_new = np.array([[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])
print(X_new)
X_new = X_new[:, 1:]
print(X_new)
X_new = sc.transform(X_new)
print(len(X_new[0]))
y_pred_one = classifier.predict(X_new)
print(y_pred_one > 0.5) | code |
129033726/cell_29 | [
"text_plain_output_1.png"
] | from scipy.stats import f_oneway
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
print(X) | code |
129033726/cell_65 | [
"text_plain_output_1.png"
] | """
#developing the model using Dropout
#here we will initialize dropout in each layer except the output layer as it might result into a unstable network
classifier=Sequential()
#1st hidden layer
classifier.add(Dense(units=6, kernel_initializer="uniform"))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dropout(p=0.1)) #adding dropout rate 10%. Its better to start with a small number
#2nd hidden layer
classifier.add(Dense(units=6, kernel_initializer="uniform"))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dropout(p=0.1))
#output layer
classifier.add(Dense(units=1, kernel_initializer="uniform",activation="sigmoid"))
#compiling the network
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
""" | code |
129033726/cell_61 | [
"text_plain_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
from scipy.stats import f_oneway
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np #to perform arithmetic operation
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
"""Here 001 represent spain
100 represent france
010 represent germany"""
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
X = np.delete(X, 0, axis=1)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered'
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
from sklearn.metrics import confusion_matrix, accuracy_score
con_matrix = confusion_matrix(y_pred, y_test)
accuracy = accuracy_score(y_pred, y_test)
type(X)
X_new = np.array([[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])
X_new = X_new[:, 1:]
X_new = sc.transform(X_new)
y_pred_one = classifier.predict(X_new)
def ann_classifier():
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
global_classifier = KerasClassifier(build_fn=ann_classifier, batch_size=32, epochs=50)
accuracies = cross_val_score(estimator=global_classifier, X=X_train, y=y_train, cv=5, n_jobs=-1)
" n_jobs is to decide number of cpus to use. If it is set to -1 then it will enable parallel computation and use\nall the cpu's"
print('Mean Accuracy:', accuracies.mean())
print('Standard Deviation:', accuracies.std())
print(accuracies) | code |
129033726/cell_67 | [
"text_plain_output_1.png"
] | """
from keras.wrappers.scikit_learn import KerasClassifier #this is used to wrap sklearn in keras
from sklearn.model_selection import GridSearchCV
def ann_classifier(optimizer):
classifier=Sequential() #this is the local classifier
classifier.add(Dense(units=6, kernel_initializer="uniform"))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer="uniform"))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer="uniform",activation="sigmoid"))
classifier.compile(optimizer =optimizer , loss = 'binary_crossentropy', metrics = ['accuracy'])
return classifier
#global Classifier
global_classifier=KerasClassifier(build_fn=ann_classifier)
#declaring the parameters
parameter={'batch_size':[25,32],'epochs':[100,200],'optimizer':['adam','rmsprop']}
grid_search=GridSearchCV(estimatior=global_classifier,param_grid=parameter,scoring='accuracy',cv=5, n_jobs=-1)
grid_search=grid_search.fit(X_train,y_train)
best_parameters=grid_search.best_params_
best_accuracies=grid_search.best_score_
""" | code |
129033726/cell_60 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
from scipy.stats import f_oneway
from sklearn.compose import ColumnTransformer
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np #to perform arithmetic operation
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
"""Here 001 represent spain
100 represent france
010 represent germany"""
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
X = np.array(ct.fit_transform(X))
X = np.delete(X, 0, axis=1)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered'
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
from sklearn.metrics import confusion_matrix, accuracy_score
con_matrix = confusion_matrix(y_pred, y_test)
accuracy = accuracy_score(y_pred, y_test)
type(X)
X_new = np.array([[1, 0, 0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])
X_new = X_new[:, 1:]
X_new = sc.transform(X_new)
y_pred_one = classifier.predict(X_new)
def ann_classifier():
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return classifier
global_classifier = KerasClassifier(build_fn=ann_classifier, batch_size=32, epochs=50)
accuracies = cross_val_score(estimator=global_classifier, X=X_train, y=y_train, cv=5, n_jobs=-1)
" n_jobs is to decide number of cpus to use. If it is set to -1 then it will enable parallel computation and use\nall the cpu's" | code |
129033726/cell_19 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
counts = data['Geography'].value_counts()
counts = data['HasCrCard'].value_counts()
counts = data['IsActiveMember'].value_counts()
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
contingency_table = pd.crosstab(data['Geography'], data['Exited'])
contingency_table.plot(kind='bar', stacked=True)
plt.xlabel('Country')
plt.ylabel('Count')
plt.title('Distribution of Geography by Churn')
plt.show() | code |
129033726/cell_49 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered' | code |
129033726/cell_32 | [
"text_plain_output_1.png"
] | from scipy.stats import f_oneway
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
data.corr()
X = data.iloc[:, 3:-1].values
y = data.iloc[:, -1].values
print(X) | code |
129033726/cell_51 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered'
classifier.fit(X_train, y_train, batch_size=10, epochs=100) | code |
129033726/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
counts = data['Geography'].value_counts()
counts = data['HasCrCard'].value_counts()
plt.bar(['Yes', 'No'], counts)
plt.xlabel('Have Hash Card or Not')
plt.ylabel('Count')
plt.title('Distribution')
plt.show() | code |
129033726/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
counts = data['Geography'].value_counts()
counts = data['HasCrCard'].value_counts()
counts = data['IsActiveMember'].value_counts()
plt.bar(['Yes', 'No'], counts)
plt.xlabel('Active or Not')
plt.ylabel('Count')
plt.title('Distribution')
plt.show() | code |
129033726/cell_47 | [
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function' | code |
129033726/cell_3 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | !pip install keras #integration of tensorflow and theano. Used to build DNN in an efficient way
!pip install tensorflow
!pip install theano #powerfull library to perform mathematical operation | code |
129033726/cell_17 | [
"image_output_1.png"
] | import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
print('TOP 20 \n')
print(top_N)
print('*' * 100)
print('BOTTOM 20 \n')
print(bottom_N) | code |
129033726/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
import seaborn as sns
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
"""Using Density plot we can visualize the distribution of different age group"""
import seaborn as sns
counts = data['Geography'].value_counts()
plt.pie(counts, labels=counts.index, autopct='%1.1f%%')
plt.title('Geographical Distribution')
plt.show() | code |
129033726/cell_22 | [
"image_output_1.png"
] | from scipy.stats import f_oneway
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
"""We can also check the top N customers based on their current account balance. Here 1 means left 0 means still with the bank"""
top_N = data.nlargest(20, 'Balance')
bottom_N = data.nsmallest(20, 'Balance')
from scipy.stats import f_oneway
grouped_data = data.groupby('Exited')['Age'].apply(list)
f_statistic, p_value = f_oneway(*grouped_data)
print('F-statistic:', f_statistic)
print('P-value:', p_value) | code |
129033726/cell_53 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from keras.layers import Dense, LeakyReLU
from keras.models import Sequential
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
classifier = Sequential()
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=6, kernel_initializer='uniform'))
classifier.add(LeakyReLU(alpha=0.1))
classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid'))
'as it is a binary classification problem thats why using 1 as number of neuron\nand using sigmoid as we have only 2 categories. If we would have more than 2 categories\nthen we might have used softmax activation function'
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
' Here optimizer is used as Adam a special type of Stochastic gradient descent(SGD), \nbinary_crossentropy loss function is used and accuracy metrics is considered'
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
from sklearn.metrics import confusion_matrix, accuracy_score
con_matrix = confusion_matrix(y_pred, y_test)
accuracy = accuracy_score(y_pred, y_test)
print(con_matrix)
print(accuracy) | code |
129033726/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time' | code |
129033726/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt #to perform different visualizations
import pandas as pd #to handle the dataframe
data = pd.read_csv('/kaggle/input/churn-predictions-personal/Churn_Predictions.csv')
data.isnull().sum()
' It\'s alaways necessary to check for the missing values. "Nan" is not always missing values. Missing values can be present in other format also.\nFeature can also have 0\'s which can also be treated as missing values for some particular dataset. Here in this case we dont have any missing value present \nin the dataset which saves a lot of time'
gender_counts = data['Gender'].value_counts()
plt.pie(gender_counts, labels=gender_counts.index, autopct='%1.1f%%')
plt.title('Gender Distribution')
plt.show() | code |
326306/cell_9 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plot
import pandas
import seaborn
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
mean_leveling_time = characters['leveling_time'].mean()
std_leveling_time = characters['leveling_time'].std()
characters_no_slowpokes = characters[characters['leveling_time'] - mean_leveling_time <= 3 * std_leveling_time]
plot.figure(figsize=(45, 10))
seaborn.boxplot(x='guild', y='leveling_time', data=characters_no_slowpokes) | code |
326306/cell_4 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char') | code |
326306/cell_6 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
mean_leveling_time = characters['leveling_time'].mean()
std_leveling_time = characters['leveling_time'].std()
characters_no_slowpokes = characters[characters['leveling_time'] - mean_leveling_time <= 3 * std_leveling_time] | code |
326306/cell_2 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp'] | code |
326306/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
import seaborn
import matplotlib.pyplot as plot
seaborn.set(style='darkgrid', palette='husl') | code |
326306/cell_7 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
characters[characters['leveling_time'].isin(characters['leveling_time'].nsmallest(10))].sort_values('leveling_time') | code |
326306/cell_8 | [
"text_plain_output_5.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_4.png",
"text_plain_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
import seaborn
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
mean_leveling_time = characters['leveling_time'].mean()
std_leveling_time = characters['leveling_time'].std()
characters_no_slowpokes = characters[characters['leveling_time'] - mean_leveling_time <= 3 * std_leveling_time]
seaborn.boxplot(x='charclass', y='leveling_time', data=characters_no_slowpokes) | code |
326306/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime) | code |
326306/cell_10 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plot
import pandas
import seaborn
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
mean_leveling_time = characters['leveling_time'].mean()
std_leveling_time = characters['leveling_time'].std()
characters_no_slowpokes = characters[characters['leveling_time'] - mean_leveling_time <= 3 * std_leveling_time]
seaborn.boxplot(x='race', y='leveling_time', data=characters_no_slowpokes) | code |
326306/cell_5 | [
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas
data = pandas.read_csv('../input/wowah_data.csv')
data.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp']
data['time'] = data['timestamp'].apply(pandas.to_datetime)
last70 = data[data['level'] == 70].groupby('char', as_index=False).last()
ding80 = data[data['level'] == 80].groupby('char', as_index=False).first()
ding80.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'ding80time']
last70.columns = ['char', 'level', 'race', 'charclass', 'zone', 'guild', 'timestamp', 'last70time']
characters = pandas.merge(ding80[['char', 'race', 'charclass', 'guild', 'ding80time']], last70[['char', 'last70time']], on='char')
characters['leveling_time'] = characters['ding80time'] - characters['last70time'] | code |
17139134/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import itertools
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
import seaborn as sns
import itertools
sns.set(style='darkgrid')
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
Nans = data[pd.isnull(data).any(axis=1)]
data.dropna(inplace=True)
actors = list((actor.split(',') for actor in data.Actors))
actors = list(itertools.chain.from_iterable(actors))
actors = [actor.strip(' ') for actor in actors]
actors_count = pd.value_counts(actors)
fig, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(data.corr(), annot=True, fmt='.2f', linewidths=0.5, ax=ax)
plt.show() | code |
17139134/cell_23 | [
"image_output_1.png"
] | import itertools
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
import seaborn as sns
import itertools
sns.set(style='darkgrid')
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
Nans = data[pd.isnull(data).any(axis=1)]
data.dropna(inplace=True)
actors = list((actor.split(',') for actor in data.Actors))
actors = list(itertools.chain.from_iterable(actors))
actors = [actor.strip(' ') for actor in actors]
actors_count = pd.value_counts(actors)
fig, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(data.corr(), annot=True, fmt=".2f", linewidths=.5, ax=ax)
plt.show()
fig, axs = plt.subplots(2, 2, figsize=(25, 15))
plt.suptitle('Boxplots of Duration,Rating,Votes and Revenue', fontsize=20)
sns.boxplot(data.Duration, ax=axs[0][0], color=sns.xkcd_rgb['cerulean'])
axs[0][0].set_xlabel('Duration (Minutes)', fontsize=14)
sns.boxplot(data.Rating, ax=axs[0][1], color='r')
axs[0][1].set_xlabel('Rating', fontsize=14)
sns.boxplot(data.Votes, ax=axs[1][0], color=sns.xkcd_rgb['teal green'])
axs[1][0].set_xlabel('Votes', fontsize=14)
sns.boxplot(data.Revenue, ax=axs[1][1], color=sns.xkcd_rgb['dusty purple'])
axs[1][1].set_xlabel('Revenue in millions', fontsize=14)
plt.show() | code |
17139134/cell_20 | [
"text_plain_output_1.png"
] | import itertools
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
import seaborn as sns
import itertools
sns.set(style='darkgrid')
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
Nans = data[pd.isnull(data).any(axis=1)]
data.dropna(inplace=True)
actors = list((actor.split(',') for actor in data.Actors))
actors = list(itertools.chain.from_iterable(actors))
actors = [actor.strip(' ') for actor in actors]
actors_count = pd.value_counts(actors)
sns.pairplot(data, kind='reg') | code |
17139134/cell_17 | [
"text_html_output_1.png"
] | import itertools
import pandas as pd
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
Nans = data[pd.isnull(data).any(axis=1)]
data.dropna(inplace=True)
actors = list((actor.split(',') for actor in data.Actors))
actors = list(itertools.chain.from_iterable(actors))
actors = [actor.strip(' ') for actor in actors]
actors_count = pd.value_counts(actors)
print('There are ', len(actors), 'different actors in the dataset,after removing NaN rows') | code |
17139134/cell_24 | [
"image_output_1.png"
] | import itertools
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import os
import seaborn as sns
import itertools
sns.set(style='darkgrid')
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
Nans = data[pd.isnull(data).any(axis=1)]
data.dropna(inplace=True)
actors = list((actor.split(',') for actor in data.Actors))
actors = list(itertools.chain.from_iterable(actors))
actors = [actor.strip(' ') for actor in actors]
actors_count = pd.value_counts(actors)
fig, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(data.corr(), annot=True, fmt=".2f", linewidths=.5, ax=ax)
plt.show()
fig, axs = plt.subplots(2, 2, figsize=(25,15))
plt.suptitle('Boxplots of Duration,Rating,Votes and Revenue',fontsize=20)
sns.boxplot(data.Duration,ax=axs[0][0],color=sns.xkcd_rgb["cerulean"])
axs[0][0].set_xlabel('Duration (Minutes)',fontsize=14)
sns.boxplot(data.Rating,ax=axs[0][1],color='r')
axs[0][1].set_xlabel('Rating',fontsize=14)
sns.boxplot(data.Votes,ax=axs[1][0],color=sns.xkcd_rgb["teal green"])
axs[1][0].set_xlabel('Votes',fontsize=14)
sns.boxplot(data.Revenue,ax=axs[1][1],color=sns.xkcd_rgb["dusty purple"])
axs[1][1].set_xlabel('Revenue in millions',fontsize=14)
plt.show()
fig, axs = plt.subplots(2, 2, figsize=(25, 15))
plt.suptitle('Histograms of Duration,Rating,Votes and Revenue', fontsize=20)
sns.distplot(data.Duration, ax=axs[0][0], color=sns.xkcd_rgb['cerulean'])
axs[0][0].set_xlabel('Duration (Minutes)', fontsize=14)
sns.distplot(data.Rating, ax=axs[0][1], color='r')
axs[0][1].set_xlabel('Rating', fontsize=14)
sns.distplot(data.Votes, ax=axs[1][0], color=sns.xkcd_rgb['teal green'])
axs[1][0].set_xlabel('Votes', fontsize=14)
sns.distplot(data.Revenue, ax=axs[1][1], color=sns.xkcd_rgb['dusty purple'])
axs[1][1].set_xlabel('Revenue in millions', fontsize=14)
plt.show() | code |
17139134/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
data.describe(include='all') | code |
17139134/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
data = pd.read_csv('../input/IMDB-Movie-Data.csv')
data.rename({'Runtime (Minutes)': 'Duration', 'Revenue (Millions)': 'Revenue'}, axis='columns', inplace=True)
print('The dataset contains NaN values: ', data.isnull().values.any())
print('Missing values in the dataset : ', data.isnull().values.sum())
for col in data.columns:
nans = pd.value_counts(data[col].isnull())
if len(nans) > 1:
print('Column: ', col, ' , Missing values: ', nans[1]) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.