path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
106192280/cell_26 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
sns.heatmap(df_cust.corr(), cmap='YlGnBu', annot=True)
plt.title('Correlation Coefficient Heatmap')
plt.show() | code |
106192280/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns | code |
106192280/cell_18 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
plt.figure(figsize=(6, 6))
df_cust['Genre'].value_counts().plot(kind='pie', autopct='%1.0f%%', shadow=True, explode=[0, 0.1])
plt.title('Population Distribution')
plt.show() | code |
106192280/cell_32 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
df_genre = pd.DataFrame({'Genre': ['Female', 'Male'], 'Genre_code': [0, 1]})
df_cust = df_cust.merge(df_genre, on='Genre')
df_cust['Genre_code'].value_counts() | code |
106192280/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust.head() | code |
106192280/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust[['CustomerID', 'Genre']].groupby('Genre').count() | code |
106192280/cell_17 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust['Genre'].value_counts() | code |
106192280/cell_35 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
df_genre = pd.DataFrame({'Genre': ['Female', 'Male'], 'Genre_code': [0, 1]})
df_cust = df_cust.merge(df_genre, on='Genre')
df_cust.drop('Genre', axis=1, inplace=True)
df_cust.columns
df_cust.head() | code |
106192280/cell_24 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
boxplot(df_cust, 'Genre', 'Annual Income (k$)', 'autumn', False, 'Annual Income distribution of Male and Female') | code |
106192280/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust['Genre'].value_counts().index[0] | code |
106192280/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum() | code |
106192280/cell_37 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
df_cust_male = df_cust[df_cust['Genre'] == 'Male']
df_cust_female = df_cust[df_cust['Genre'] == 'Female']
def boxplot(frame,x,y,*args):
'''This function helps to plot the boxplot
frame : dataframe to be used
x : dataframe column for x axis
y : dataframe column for y axis
*args : to include more features like Title, palette, notch'''
plt.figure(figsize=(8,8))
bp=sns.boxplot(data=frame,x=x,y=y,palette=args[0],notch=args[1])
medians = frame.groupby([x])[y].median().sort_values(ascending=False)
vertical_offset = frame[y].median() * 0.01 # offset from median for display
for xtick in bp.get_xticks():
bp.text(xtick,medians[xtick] + vertical_offset,medians[xtick],
horizontalalignment='center',size='medium',color='blue',weight='semibold')
plt.title(args[2])
plt.grid()
plt.show()
df_genre = pd.DataFrame({'Genre': ['Female', 'Male'], 'Genre_code': [0, 1]})
df_cust = df_cust.merge(df_genre, on='Genre')
df_cust.drop('Genre', axis=1, inplace=True)
df_cust.columns
df_cust.drop('CustomerID', axis=1, inplace=True)
df_cust.head() | code |
106192280/cell_12 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.columns
df_cust.isna().sum()
df_cust.isnull().sum()
np.mean(df_cust) | code |
106192280/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df_cust = pd.read_csv('../input/customer-segmentation-dataset/Mall_Customers.csv')
df_cust.info() | code |
18102745/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data = data.dropna(axis=0)
X = data.drop(['SalePrice'], axis=1)
y = data.SalePrice
y.head() | code |
18102745/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
print(data.columns)
col = ['LotArea', 'SalePrice']
two = data[col] | code |
18102745/cell_11 | [
"text_html_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data = data.dropna(axis=0)
X = data.drop(['SalePrice'], axis=1)
y = data.SalePrice
y.count()
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=1)
model.fit(X, y) | code |
18102745/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data = data.dropna(axis=0)
X = data.drop(['SalePrice'], axis=1)
y = data.SalePrice
X.head() | code |
18102745/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
data.head() | code |
18102745/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data = data.dropna(axis=0)
X = data.drop(['SalePrice'], axis=1)
y = data.SalePrice
y.count() | code |
18102745/cell_12 | [
"text_html_output_1.png"
] | from sklearn.tree import DecisionTreeRegressor
import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data = data.dropna(axis=0)
X = data.drop(['SalePrice'], axis=1)
y = data.SalePrice
y.count()
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=1)
model.fit(X, y)
print('Making predictions for the following 5 houses:')
print(X.head())
print('The predictions are')
print(model.predict(X.head())) | code |
18102745/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd
main_file_path = '../input/train.csv'
data = pd.read_csv(main_file_path)
col = ['LotArea', 'SalePrice']
two = data[col]
features = ['MSSubClass', 'LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'YearBuilt', 'YearRemodAdd', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'FullBath', 'HalfBath', 'BedroomAbvGr', 'KitchenAbvGr', 'TotRmsAbvGrd', 'GarageYrBlt', 'GarageCars', 'GarageArea', 'OpenPorchSF', 'MoSold', 'YrSold', 'SalePrice']
data = data[features]
data.describe() | code |
73066361/cell_4 | [
"text_html_output_1.png"
] | from sklearn import model_selection
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import model_selection
train = pd.read_csv('../input/30daysml/train.csv/train.csv')
train['kfold'] = -1
kf = model_selection.KFold(n_splits=10, shuffle=True, random_state=0)
for fold, (train_indicies, valid_indicies) in enumerate(kf.split(X=train)):
train.loc[valid_indicies, 'kfold'] = fold
train.head() | code |
73066361/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn import model_selection
train = pd.read_csv('../input/30daysml/train.csv/train.csv')
train['kfold'] = -1
train.head() | code |
73066361/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import pandas as pd
from sklearn import model_selection
train = pd.read_csv('../input/30daysml/train.csv/train.csv')
train.head() | code |
121154736/cell_11 | [
"text_plain_output_1.png"
] | from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
df = pd.read_csv('/kaggle/input/sports-classification/sports.csv')
test = df[df['data set'] == 'test']
df = df[df['data set'] == 'train']
df = df.sample(frac=1)
data_augmentation = tf.keras.Sequential([layers.RandomFlip('horizontal_and_vertical'), layers.RandomRotation(0.2)])
model = Sequential([keras.layers.InputLayer(input_shape=(224, 224, 3)), data_augmentation, keras.layers.Conv2D(64, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Conv2D(32, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Conv2D(16, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Flatten(), keras.layers.Dense(200, activation='relu'), keras.layers.Dense(100, activation='sigmoid')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
def get_pic(filepath):
img = cv2.imread('/kaggle/input/sports-classification/' + filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
d = {}
u = list(df['labels'].unique())
for i, j in enumerate(u):
d[j] = i
x_test = list(test['filepaths'].apply(get_pic))
y_test = list(test['labels'].apply(lambda x: d[x]))
y_test = np.array(y_test)
x_test = np.array(x_test) / 255
img_loc = []
y_train = []
for i in range(100):
start = 0
temp_df = df.sample(1000)
img_loc = list(temp_df['filepaths'].apply(get_pic))
y_train = list(temp_df['labels'].apply(lambda x: d[x]))
y_train = np.array(y_train)
img_loc = np.array(img_loc) / 255
model.fit(img_loc, y_train, epochs=1, batch_size=64) | code |
121154736/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
df = pd.read_csv('/kaggle/input/sports-classification/sports.csv')
test = df[df['data set'] == 'test']
df = df[df['data set'] == 'train']
df = df.sample(frac=1)
data_augmentation = tf.keras.Sequential([layers.RandomFlip('horizontal_and_vertical'), layers.RandomRotation(0.2)])
model = Sequential([keras.layers.InputLayer(input_shape=(224, 224, 3)), data_augmentation, keras.layers.Conv2D(64, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Conv2D(32, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Conv2D(16, 3, activation='relu'), keras.layers.MaxPooling2D(), keras.layers.Flatten(), keras.layers.Dense(200, activation='relu'), keras.layers.Dense(100, activation='sigmoid')])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
def get_pic(filepath):
img = cv2.imread('/kaggle/input/sports-classification/' + filepath)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
d = {}
u = list(df['labels'].unique())
for i, j in enumerate(u):
d[j] = i
x_test = list(test['filepaths'].apply(get_pic))
y_test = list(test['labels'].apply(lambda x: d[x]))
y_test = np.array(y_test)
x_test = np.array(x_test) / 255
img_loc = []
y_train = []
for i in range(100):
start = 0
temp_df = df.sample(1000)
img_loc = list(temp_df['filepaths'].apply(get_pic))
y_train = list(temp_df['labels'].apply(lambda x: d[x]))
y_train = np.array(y_train)
img_loc = np.array(img_loc) / 255
model.fit(img_loc, y_train, epochs=1, batch_size=64)
model.evaluate(x_test, y_test) | code |
34134672/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
print(str(train.shape[0] - train_clean.shape[0]) + ' rows deleted in train')
train = train_clean | code |
34134672/cell_19 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
clf = RandomForestClassifier(max_depth=25, random_state=42, min_samples_leaf=5, n_estimators=25)
scores = cross_val_score(clf, train, labels, cv=5)
scores.mean()
scores = clf.predict(test)
len(scores) | code |
34134672/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34134672/cell_18 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
clf = RandomForestClassifier(max_depth=25, random_state=42, min_samples_leaf=5, n_estimators=25)
scores = clf.predict(test) | code |
34134672/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
clf = RandomForestClassifier(max_depth=25, random_state=42, min_samples_leaf=5, n_estimators=25)
scores = cross_val_score(clf, train, labels, cv=5)
scores.mean() | code |
34134672/cell_17 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestClassifier
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
clf = RandomForestClassifier(max_depth=25, random_state=42, min_samples_leaf=5, n_estimators=25)
print(clf.score(train, labels))
print(clf.score(X_test, y_test)) | code |
34134672/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
train.info() | code |
34134672/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('/kaggle/input/titanic/train.csv')
test = pd.read_csv('/kaggle/input/titanic/test.csv')
train.drop(['Name'], axis=1, inplace=True)
train.drop(['Cabin'], axis=1, inplace=True)
train.drop(['Ticket'], axis=1, inplace=True)
test.drop(['Name'], axis=1, inplace=True)
test.drop(['Cabin'], axis=1, inplace=True)
test.drop(['Ticket'], axis=1, inplace=True)
train = train[train['Embarked'].notna()]
train = train[train['Fare'] < 300]
train_clean = train.dropna(thresh=train.shape[1] - 1)
train = train_clean
ids = train['PassengerId']
train.drop(['PassengerId'], axis=1, inplace=True)
from sklearn.ensemble import AdaBoostClassifier
ids_test = test['PassengerId']
test.drop(['PassengerId'], axis=1, inplace=True)
labels = train['Survived']
train.drop(['Survived'], axis=1, inplace=True)
train['Sex'] = pd.factorize(train['Sex'])[0]
train['Embarked'] = pd.factorize(train['Embarked'])[0]
test['Sex'] = pd.factorize(test['Sex'])[0]
test['Embarked'] = pd.factorize(test['Embarked'])[0]
dummy_columns = ['Sex', 'Pclass', 'Embarked']
for column in dummy_columns:
just_dummies = pd.get_dummies(train[column])
train = pd.concat([train, just_dummies], axis=1)
train = train.drop([column], axis=1)
for column in dummy_columns:
just_dummies = pd.get_dummies(test[column])
test = pd.concat([test, just_dummies], axis=1)
test = test.drop([column], axis=1)
scalerStd = StandardScaler()
scalerStd.fit(train)
scalerStd.transform(train) | code |
16167842/cell_4 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input'
train = pd.read_csv(f'{data_dir}/train.csv')
test = pd.read_csv(f'{data_dir}/test.csv')
sub = pd.read_csv(f'{data_dir}/sample_submission.csv')
structures = pd.read_csv(f'{data_dir}/structures.csv')
are_the_same_types = np.all(sorted(train['type'].unique()) == sorted(test['type'].unique()))
train.head() | code |
16167842/cell_6 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input'
train = pd.read_csv(f'{data_dir}/train.csv')
test = pd.read_csv(f'{data_dir}/test.csv')
sub = pd.read_csv(f'{data_dir}/sample_submission.csv')
structures = pd.read_csv(f'{data_dir}/structures.csv')
are_the_same_types = np.all(sorted(train['type'].unique()) == sorted(test['type'].unique()))
for type_ in np.unique(train['type']):
ix = train['type'] == type_
fig, axes = plt.subplots(2, 2, figsize=(12, 4))
axes = axes.flatten()
_ = axes[0].hist(x=train['atom_index_0'][ix], bins=50)
_ = axes[1].hist(x=test['atom_index_0'][ix], bins=50)
_ = axes[2].hist(x=train['atom_index_1'][ix], bins=50)
_ = axes[3].hist(x=test['atom_index_1'][ix], bins=50)
axes[0].set(xlabel='count', ylabel='atom_index_0', title=f'{type_}, train')
axes[1].set(xlabel='count', ylabel='atom_index_0', title=f'{type_}, test')
axes[2].set(xlabel='count', ylabel='atom_index_1', title=f'{type_}, train')
axes[3].set(xlabel='count', ylabel='atom_index_1', title=f'{type_}, test')
fig.tight_layout() | code |
16167842/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
print(os.listdir('../input'))
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input' | code |
16167842/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input'
train = pd.read_csv(f'{data_dir}/train.csv')
test = pd.read_csv(f'{data_dir}/test.csv')
sub = pd.read_csv(f'{data_dir}/sample_submission.csv')
structures = pd.read_csv(f'{data_dir}/structures.csv')
are_the_same_types = np.all(sorted(train['type'].unique()) == sorted(test['type'].unique()))
train['atom_index_1'].hist(bins=50) | code |
16167842/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input'
train = pd.read_csv(f'{data_dir}/train.csv')
test = pd.read_csv(f'{data_dir}/test.csv')
sub = pd.read_csv(f'{data_dir}/sample_submission.csv')
structures = pd.read_csv(f'{data_dir}/structures.csv')
print('train shape', train.shape)
print('test shape', train.shape)
print('structures shape', structures.shape)
print('sub', sub.shape)
print('train cols', list(train.columns))
print('test cols', list(test.columns))
print('structures cols', list(structures.columns))
print('structures atoms', list(np.unique(structures['atom'])))
print('')
print(f"There are {train['molecule_name'].nunique()} distinct molecules in train data.")
print(f"There are {test['molecule_name'].nunique()} distinct molecules in test data.")
print(f"There are {structures['atom'].nunique()} unique atoms in structures")
print(f"There are {train['type'].nunique()} unique types in train")
print(f"There are {test['type'].nunique()} unique types in test")
are_the_same_types = np.all(sorted(train['type'].unique()) == sorted(test['type'].unique()))
print(f'Are all types in train and test the same? {are_the_same_types}') | code |
16167842/cell_5 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import os
data_dir = '../input/champs-scalar-coupling' if 'champs-scalar-coupling' in os.listdir('../input/') else '../input'
train = pd.read_csv(f'{data_dir}/train.csv')
test = pd.read_csv(f'{data_dir}/test.csv')
sub = pd.read_csv(f'{data_dir}/sample_submission.csv')
structures = pd.read_csv(f'{data_dir}/structures.csv')
are_the_same_types = np.all(sorted(train['type'].unique()) == sorted(test['type'].unique()))
structures.head() | code |
74058130/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/segment-road/Train (1).csv')
test = pd.read_csv('../input/segment-road/Test (1).csv')
sub = pd.read_csv('../input/segment-road/SampleSubmission.csv')
train.head() | code |
74058130/cell_18 | [
"text_plain_output_1.png"
] | from kaggle_datasets import KaggleDatasets
from keras.applications import VGG19,ResNet50,Xception,InceptionResNetV2,InceptionV3,ResNet152V2
import efficientnet.tfkeras as efn
import numpy as np
import os
import pandas as pd
import tensorflow as tf
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 42
train = pd.read_csv('../input/segment-road/Train (1).csv')
test = pd.read_csv('../input/segment-road/Test (1).csv')
sub = pd.read_csv('../input/segment-road/SampleSubmission.csv')
def auto_select_accelerator():
"""
Reference:
* https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu
* https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext='png'):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == 'png':
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ['jpg', 'jpeg']:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError('Image extension not supported')
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return (decode(path), label)
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.75, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.image.random_contrast(image, lower=0.75, upper=1.5)
image = tf.image.rot90(image)
image = tf.image.transpose(image)
return image
def augment_with_labels(img, label):
return (augment(img), label)
return augment_with_labels if with_labels else augment
def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''):
if cache_dir != '' and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = 'road-segment-dataset'
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 8
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
paths = []
labels = []
for i in range(len(train['file_path'].values)):
name = train['Image_ID'].values[i]
label = train['Target'].values[i]
paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
labels.append(label)
paths = np.array(paths)
labels = np.array(labels)
def get_model(name):
if name == 'effnetb0':
with strategy.scope():
base_model = efn.EfficientNetB0(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb1':
with strategy.scope():
base_model = efn.EfficientNetB1(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb2':
with strategy.scope():
base_model = efn.EfficientNetB2(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb3':
with strategy.scope():
base_model = efn.EfficientNetB3(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb4':
with strategy.scope():
base_model = efn.EfficientNetB4(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb5':
with strategy.scope():
base_model = efn.EfficientNetB5(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb6':
with strategy.scope():
base_model = efn.EfficientNetB6(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb7':
with strategy.scope():
base_model = efn.EfficientNetB7(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'resnet':
with strategy.scope():
base_model = ResNet50(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'xception':
with strategy.scope():
base_model = Xception(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inception':
with strategy.scope():
base_model = InceptionV3(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inceptionresnet':
with strategy.scope():
base_model = InceptionResNetV2(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
with strategy.scope():
model = tf.keras.Sequential([base_model, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.001), metrics=[tf.keras.metrics.AUC()])
return model
test_paths = []
for i in range(len(test['file_path'].values)):
name = test['Image_ID'].values[i]
test_paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
IMSIZES = (224, 240, 260, 300, 380, 456, 512, 600)
im_size = 250
decoder = build_decoder(with_labels=True, target_size=(im_size, im_size))
test_decoder = build_decoder(with_labels=False, target_size=(im_size, im_size))
train_dataset = build_dataset(train_paths, train_labels, bsize=BATCH_SIZE, decode_fn=decoder)
valid_dataset = build_dataset(valid_paths, valid_labels, bsize=BATCH_SIZE, decode_fn=decoder, repeat=False, shuffle=False, augment=False)
test_decoder = build_decoder(with_labels=False, target_size=(im_size, im_size))
dtest = build_dataset(test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder)
model_name = 'effnetb1'
model = get_model(model_name)
model.summary()
train_paths = np.array(train_paths)
steps_per_epoch = train_paths.shape[0] // BATCH_SIZE
checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_best_auc.h5', save_best_only=True, monitor='val_auc', mode='max')
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_auc', patience=3, min_lr=1e-07, mode='min')
history = model.fit(train_dataset, epochs=30, verbose=1, callbacks=[checkpoint, lr_reducer], steps_per_epoch=steps_per_epoch, validation_data=valid_dataset)
model.load_weights(f'./{model_name}_best_auc.h5')
preds1 = model.predict(dtest, verbose=1) | code |
74058130/cell_8 | [
"text_plain_output_1.png"
] | from kaggle_datasets import KaggleDatasets
import numpy as np
import os
import tensorflow as tf
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 42
def auto_select_accelerator():
"""
Reference:
* https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu
* https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext='png'):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == 'png':
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ['jpg', 'jpeg']:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError('Image extension not supported')
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return (decode(path), label)
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.75, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.image.random_contrast(image, lower=0.75, upper=1.5)
image = tf.image.rot90(image)
image = tf.image.transpose(image)
return image
def augment_with_labels(img, label):
return (augment(img), label)
return augment_with_labels if with_labels else augment
def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''):
if cache_dir != '' and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = 'road-segment-dataset'
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 8
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME) | code |
74058130/cell_15 | [
"text_plain_output_1.png"
] | from kaggle_datasets import KaggleDatasets
from keras.applications import VGG19,ResNet50,Xception,InceptionResNetV2,InceptionV3,ResNet152V2
import efficientnet.tfkeras as efn
import numpy as np
import os
import pandas as pd
import tensorflow as tf
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 42
train = pd.read_csv('../input/segment-road/Train (1).csv')
test = pd.read_csv('../input/segment-road/Test (1).csv')
sub = pd.read_csv('../input/segment-road/SampleSubmission.csv')
def auto_select_accelerator():
"""
Reference:
* https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu
* https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext='png'):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == 'png':
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ['jpg', 'jpeg']:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError('Image extension not supported')
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return (decode(path), label)
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.75, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.image.random_contrast(image, lower=0.75, upper=1.5)
image = tf.image.rot90(image)
image = tf.image.transpose(image)
return image
def augment_with_labels(img, label):
return (augment(img), label)
return augment_with_labels if with_labels else augment
def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''):
if cache_dir != '' and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = 'road-segment-dataset'
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 8
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
paths = []
labels = []
for i in range(len(train['file_path'].values)):
name = train['Image_ID'].values[i]
label = train['Target'].values[i]
paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
labels.append(label)
paths = np.array(paths)
labels = np.array(labels)
def get_model(name):
if name == 'effnetb0':
with strategy.scope():
base_model = efn.EfficientNetB0(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb1':
with strategy.scope():
base_model = efn.EfficientNetB1(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb2':
with strategy.scope():
base_model = efn.EfficientNetB2(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb3':
with strategy.scope():
base_model = efn.EfficientNetB3(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb4':
with strategy.scope():
base_model = efn.EfficientNetB4(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb5':
with strategy.scope():
base_model = efn.EfficientNetB5(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb6':
with strategy.scope():
base_model = efn.EfficientNetB6(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb7':
with strategy.scope():
base_model = efn.EfficientNetB7(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'resnet':
with strategy.scope():
base_model = ResNet50(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'xception':
with strategy.scope():
base_model = Xception(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inception':
with strategy.scope():
base_model = InceptionV3(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inceptionresnet':
with strategy.scope():
base_model = InceptionResNetV2(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
with strategy.scope():
model = tf.keras.Sequential([base_model, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.001), metrics=[tf.keras.metrics.AUC()])
return model
model_name = 'effnetb1'
model = get_model(model_name)
model.summary() | code |
74058130/cell_17 | [
"text_plain_output_1.png"
] | from kaggle_datasets import KaggleDatasets
from keras.applications import VGG19,ResNet50,Xception,InceptionResNetV2,InceptionV3,ResNet152V2
import efficientnet.tfkeras as efn
import numpy as np
import os
import pandas as pd
import tensorflow as tf
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 42
train = pd.read_csv('../input/segment-road/Train (1).csv')
test = pd.read_csv('../input/segment-road/Test (1).csv')
sub = pd.read_csv('../input/segment-road/SampleSubmission.csv')
def auto_select_accelerator():
"""
Reference:
* https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu
* https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext='png'):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == 'png':
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ['jpg', 'jpeg']:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError('Image extension not supported')
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return (decode(path), label)
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.75, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.image.random_contrast(image, lower=0.75, upper=1.5)
image = tf.image.rot90(image)
image = tf.image.transpose(image)
return image
def augment_with_labels(img, label):
return (augment(img), label)
return augment_with_labels if with_labels else augment
def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''):
if cache_dir != '' and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = 'road-segment-dataset'
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 8
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
paths = []
labels = []
for i in range(len(train['file_path'].values)):
name = train['Image_ID'].values[i]
label = train['Target'].values[i]
paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
labels.append(label)
paths = np.array(paths)
labels = np.array(labels)
def get_model(name):
if name == 'effnetb0':
with strategy.scope():
base_model = efn.EfficientNetB0(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb1':
with strategy.scope():
base_model = efn.EfficientNetB1(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb2':
with strategy.scope():
base_model = efn.EfficientNetB2(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb3':
with strategy.scope():
base_model = efn.EfficientNetB3(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb4':
with strategy.scope():
base_model = efn.EfficientNetB4(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb5':
with strategy.scope():
base_model = efn.EfficientNetB5(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb6':
with strategy.scope():
base_model = efn.EfficientNetB6(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'effnetb7':
with strategy.scope():
base_model = efn.EfficientNetB7(input_shape=(im_size, im_size, 3), weights='noisy-student', include_top=False)
elif name == 'resnet':
with strategy.scope():
base_model = ResNet50(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'xception':
with strategy.scope():
base_model = Xception(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inception':
with strategy.scope():
base_model = InceptionV3(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
elif name == 'inceptionresnet':
with strategy.scope():
base_model = InceptionResNetV2(input_shape=(im_size, im_size, 3), weights='imagenet', include_top=False)
with strategy.scope():
model = tf.keras.Sequential([base_model, tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(1, activation='sigmoid')])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.001), loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.001), metrics=[tf.keras.metrics.AUC()])
return model
test_paths = []
for i in range(len(test['file_path'].values)):
name = test['Image_ID'].values[i]
test_paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
IMSIZES = (224, 240, 260, 300, 380, 456, 512, 600)
im_size = 250
decoder = build_decoder(with_labels=True, target_size=(im_size, im_size))
test_decoder = build_decoder(with_labels=False, target_size=(im_size, im_size))
train_dataset = build_dataset(train_paths, train_labels, bsize=BATCH_SIZE, decode_fn=decoder)
valid_dataset = build_dataset(valid_paths, valid_labels, bsize=BATCH_SIZE, decode_fn=decoder, repeat=False, shuffle=False, augment=False)
test_decoder = build_decoder(with_labels=False, target_size=(im_size, im_size))
dtest = build_dataset(test_paths, bsize=BATCH_SIZE, repeat=False, shuffle=False, augment=False, cache=False, decode_fn=test_decoder)
model_name = 'effnetb1'
model = get_model(model_name)
model.summary()
train_paths = np.array(train_paths)
steps_per_epoch = train_paths.shape[0] // BATCH_SIZE
checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_best_auc.h5', save_best_only=True, monitor='val_auc', mode='max')
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_auc', patience=3, min_lr=1e-07, mode='min')
history = model.fit(train_dataset, epochs=30, verbose=1, callbacks=[checkpoint, lr_reducer], steps_per_epoch=steps_per_epoch, validation_data=valid_dataset) | code |
74058130/cell_10 | [
"text_html_output_1.png"
] | from kaggle_datasets import KaggleDatasets
import numpy as np
import os
import pandas as pd
import tensorflow as tf
def seed_everything(seed=0):
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed = 42
train = pd.read_csv('../input/segment-road/Train (1).csv')
test = pd.read_csv('../input/segment-road/Test (1).csv')
sub = pd.read_csv('../input/segment-road/SampleSubmission.csv')
def auto_select_accelerator():
"""
Reference:
* https://www.kaggle.com/mgornergoogle/getting-started-with-100-flowers-on-tpu
* https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
except ValueError:
strategy = tf.distribute.get_strategy()
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext='png'):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == 'png':
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ['jpg', 'jpeg']:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError('Image extension not supported')
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return (decode(path), label)
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(image):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
image = tf.image.random_brightness(image, max_delta=0.1)
image = tf.image.random_saturation(image, lower=0.75, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.15)
image = tf.image.random_contrast(image, lower=0.75, upper=1.5)
image = tf.image.rot90(image)
image = tf.image.transpose(image)
return image
def augment_with_labels(img, label):
return (augment(img), label)
return augment_with_labels if with_labels else augment
def build_dataset(paths, labels=None, bsize=32, cache=True, decode_fn=None, augment_fn=None, augment=True, repeat=True, shuffle=1024, cache_dir=''):
if cache_dir != '' and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = 'road-segment-dataset'
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 8
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
paths = []
labels = []
for i in range(len(train['file_path'].values)):
name = train['Image_ID'].values[i]
label = train['Target'].values[i]
paths.append(GCS_DS_PATH + '/Images/' + name + '.png')
labels.append(label)
paths = np.array(paths)
labels = np.array(labels)
(len(paths), len(labels)) | code |
72121714/cell_12 | [
"image_output_11.png",
"application_vnd.jupyter.stderr_output_27.png",
"application_vnd.jupyter.stderr_output_35.png",
"application_vnd.jupyter.stderr_output_24.png",
"image_output_24.png",
"application_vnd.jupyter.stderr_output_9.png",
"application_vnd.jupyter.stderr_output_52.png",
"application_vnd.jupyter.stderr_output_53.png",
"text_plain_output_43.png",
"image_output_25.png",
"application_vnd.jupyter.stderr_output_48.png",
"text_plain_output_30.png",
"application_vnd.jupyter.stderr_output_2.png",
"application_vnd.jupyter.stderr_output_7.png",
"image_output_17.png",
"application_vnd.jupyter.stderr_output_11.png",
"image_output_30.png",
"application_vnd.jupyter.stderr_output_38.png",
"image_output_14.png",
"image_output_39.png",
"image_output_28.png",
"application_vnd.jupyter.stderr_output_4.png",
"application_vnd.jupyter.stderr_output_26.png",
"image_output_23.png",
"application_vnd.jupyter.stderr_output_6.png",
"image_output_34.png",
"application_vnd.jupyter.stderr_output_31.png",
"application_vnd.jupyter.stderr_output_33.png",
"application_vnd.jupyter.stderr_output_25.png",
"image_output_13.png",
"text_plain_output_45.png",
"image_output_40.png",
"image_output_5.png",
"application_vnd.jupyter.stderr_output_12.png",
"text_plain_output_14.png",
"image_output_18.png",
"text_plain_output_32.png",
"text_plain_output_29.png",
"application_vnd.jupyter.stderr_output_8.png",
"text_plain_output_58.png",
"image_output_21.png",
"image_output_7.png",
"application_vnd.jupyter.stderr_output_10.png",
"application_vnd.jupyter.stderr_output_23.png",
"image_output_31.png",
"text_plain_output_47.png",
"application_vnd.jupyter.stderr_output_34.png",
"image_output_20.png",
"text_plain_output_18.png",
"image_output_32.png",
"text_plain_output_3.png",
"application_vnd.jupyter.stderr_output_19.png",
"image_output_4.png",
"application_vnd.jupyter.stderr_output_44.png",
"application_vnd.jupyter.stderr_output_13.png",
"application_vnd.jupyter.stderr_output_42.png",
"application_vnd.jupyter.stderr_output_5.png",
"image_output_35.png",
"image_output_36.png",
"image_output_8.png",
"image_output_37.png",
"text_plain_output_16.png",
"image_output_16.png",
"application_vnd.jupyter.stderr_output_15.png",
"text_plain_output_59.png",
"application_vnd.jupyter.stderr_output_17.png",
"image_output_27.png",
"application_vnd.jupyter.stderr_output_28.png",
"application_vnd.jupyter.stderr_output_46.png",
"image_output_6.png",
"application_vnd.jupyter.stderr_output_41.png",
"application_vnd.jupyter.stderr_output_20.png",
"application_vnd.jupyter.stderr_output_49.png",
"application_vnd.jupyter.stderr_output_36.png",
"application_vnd.jupyter.stderr_output_57.png",
"application_vnd.jupyter.stderr_output_22.png",
"image_output_12.png",
"image_output_22.png",
"application_vnd.jupyter.stderr_output_56.png",
"application_vnd.jupyter.stderr_output_50.png",
"text_plain_output_1.png",
"image_output_3.png",
"application_vnd.jupyter.stderr_output_51.png",
"image_output_29.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"application_vnd.jupyter.stderr_output_39.png",
"image_output_33.png",
"application_vnd.jupyter.stderr_output_21.png",
"application_vnd.jupyter.stderr_output_54.png",
"image_output_15.png",
"application_vnd.jupyter.stderr_output_55.png",
"image_output_9.png",
"image_output_19.png",
"application_vnd.jupyter.stderr_output_40.png",
"application_vnd.jupyter.stderr_output_37.png",
"image_output_38.png",
"image_output_26.png"
] | from torchvision.utils import make_grid
from tqdm import tqdm
import copy
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as datasets
import torchvision.transforms as transforms
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
runs = [{'latent': 20, 'nE': 100, 'bs': 64, 'cap': 64, 'lr': 0.001, 'wd': 1e-05, 'vb': 1}, {'latent': 20, 'nE': 200, 'bs': 64, 'cap': 64, 'lr': 0.001, 'wd': 1e-05, 'vb': 1}, {'latent': 20, 'nE': 100, 'bs': 64, 'cap': 64, 'lr': 0.001, 'wd': 1e-05, 'vb': 0.5}, {'latent': 20, 'nE': 200, 'bs': 64, 'cap': 64, 'lr': 0.001, 'wd': 1e-05, 'vb': 0.5}]
train_dir = '../input/covid19-chest-ct-image-augmentation-gan-dataset/COVID-19/COVID-19/train'
train_dataset = datasets.ImageFolder(train_dir, transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()]))
def show_image(image_tensor, num_images=25, size=(1, 28, 28)):
'''
Function for visualizing images: Given a tensor of images, number of images, and
size per image, plots and prints the images in an uniform grid.
'''
image_tensor = (image_tensor + 1) / 2
image_unflat = image_tensor.detach().cpu()
image_grid = make_grid(image_unflat[:num_images], nrow=5)
plt.imshow(image_grid.permute(1, 2, 0).squeeze())
plt.show()
def plot_loss(training_loss, settings):
fig, ax = plt.subplots()
ax.plot(range(len(training_loss)), training_loss)
ax.set(xlabel='epochs', ylabel='BCE loss')
ax.set_title(f'Loss for {settings}', y=1.1)
ax.grid()
fig.savefig("test.png")
plt.show()
class Encoder(nn.Module):
"""encoder for VAE, goes from image conv net to linear latent layer"""
def __init__(self, capacity, latent_dims):
super(Encoder, self).__init__()
c = capacity
self.conv1 = nn.Conv2d(in_channels=3, out_channels=c, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.Conv2d(in_channels=c, out_channels=c * 2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.Conv2d(in_channels=c * 2, out_channels=c * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.Conv2d(in_channels=c * 2 * 2, out_channels=c * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv5 = nn.Conv2d(in_channels=c * 2 * 2 * 2, out_channels=c * 2 * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv6 = nn.Conv2d(in_channels=c * 2 * 2 * 2 * 2, out_channels=c * 2 * 2 * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
self.fc_mu = nn.Linear(in_features=32768, out_features=latent_dims)
self.fc_logvar = nn.Linear(in_features=32768, out_features=latent_dims)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = x.view(x.size(0), -1)
x_mu = self.fc_mu(x)
x_logvar = self.fc_logvar(x)
return (x_mu, x_logvar)
class Decoder(nn.Module):
"""decoder for VAE, goes from linear latent layer to deconv layers to reconstruct image"""
def __init__(self, capacity, latent_dims):
super(Decoder, self).__init__()
c = capacity
self.fc = nn.Linear(in_features=latent_dims, out_features=32768)
self.conv1 = nn.ConvTranspose2d(in_channels=c, out_channels=3, kernel_size=4, stride=2, padding=1)
self.conv2 = nn.ConvTranspose2d(out_channels=c, in_channels=c * 2, kernel_size=4, stride=2, padding=1)
self.conv3 = nn.ConvTranspose2d(out_channels=c * 2, in_channels=c * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv4 = nn.ConvTranspose2d(out_channels=c * 2 * 2, in_channels=c * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv5 = nn.ConvTranspose2d(out_channels=c * 2 * 2 * 2, in_channels=c * 2 * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
self.conv6 = nn.ConvTranspose2d(out_channels=c * 2 * 2 * 2 * 2, in_channels=c * 2 * 2 * 2 * 2 * 2, kernel_size=4, stride=2, padding=1)
def forward(self, x):
x = self.fc(x)
x = x.view(x.size(0), 2048, 4, 4)
x = F.relu(self.conv6(x))
x = F.relu(self.conv5(x))
x = F.relu(self.conv4(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv2(x))
x = torch.sigmoid(self.conv1(x))
return x
class VAE(nn.Module):
"""VAE architecture for encoder -> sample from latent -> decode latent sample"""
def __init__(self, capacity, latent_dims):
super(VAE, self).__init__()
self.encoder = Encoder(capacity, latent_dims)
self.decoder = Decoder(capacity, latent_dims)
def forward(self, x):
latent_mu, latent_logvar = self.encoder(x)
latent = self.latent_sample(latent_mu, latent_logvar)
x_recon = self.decoder(latent)
return (x_recon, latent_mu, latent_logvar)
def latent_sample(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = torch.empty_like(std).normal_()
return eps.mul(std).add_(mu)
else:
return mu
def vae_loss(recon_x, x, mu, logvar, variational_beta):
recon_loss = F.binary_cross_entropy(recon_x.view(-1, 65536), x.view(-1, 65536), reduction='sum')
kldivergence = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return recon_loss + variational_beta * kldivergence
def setup_model(capacity, latent_dims):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
vae = VAE(capacity=capacity, latent_dims=latent_dims).to(device)
return vae
def train(vae, train_loader, n_epochs, learning_rate, weight_decay, variational_beta):
optimizer = torch.optim.Adam(params=vae.parameters(), lr=learning_rate, weight_decay=weight_decay)
vae.train()
train_loss = []
best_model_wts = None
bmw_epoch = 0
for epoch in tqdm(range(n_epochs)):
num_batches = 0
avg_loss = 0
best_loss = 0
image_batch_recon = None
for image_batch, _ in train_loader:
image_batch = image_batch.to(device)
image_batch_recon, latent_mu, latent_logvar = vae(image_batch)
loss = vae_loss(image_batch_recon, image_batch, latent_mu, latent_logvar, variational_beta)
optimizer.zero_grad()
loss.backward()
optimizer.step()
avg_loss += loss.item()
num_batches += 1
avg_loss /= num_batches
train_loss.append(avg_loss)
if epoch == 0:
best_loss = avg_loss
best_model_wts = copy.deepcopy(vae.state_dict())
if avg_loss < best_loss:
best_model_wts = copy.deepcopy(vae.state_dict())
best_loss = avg_loss
bmw_epoch = epoch + 1
return (vae, best_model_wts, bmw_epoch, train_loss)
for k, settings in enumerate(runs):
print(f'starting run ... {k}/{len(runs)}')
print(settings)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings['bs'], shuffle=True, num_workers=0)
vae = setup_model(settings['cap'], settings['latent'])
vae, best_model_wts, bmw_epoch, train_loss = train(vae, train_loader, settings['nE'], settings['lr'], settings['wd'], settings['vb'])
last_modeL_wts = vae.state_dict()
plot_loss(train_loss, settings)
print('-------------------------------------------------------------------')
print('saving weights for model')
torch.save(vae.state_dict(), f"n_{settings['nE']}.ld_{settings['latent']}.lr_{settings['lr']}.vb_{settings['vb']}.last_model.wts")
vae.load_state_dict(best_model_wts)
torch.save(vae.state_dict(), f"n_{bmw_epoch}.ld_{settings['latent']}.lr_{settings['lr']}.vb_{settings['vb']}.lowest_loss_model.wts")
print('-------------------------------------------------------------------')
print() | code |
34129243/cell_4 | [
"image_output_2.png",
"image_output_1.png"
] | df.hist(bins=20, figsize=(20, 15))
plt.show()
correlation_matrix = df.corr()
fig = plt.figure(figsize=(12, 9))
sns.heatmap(correlation_matrix, vmax=0.8, square=True)
plt.show() | code |
34129243/cell_6 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler,OneHotEncoder
from sklearn.tree import DecisionTreeRegressor
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TRAIN_DATA = '/kaggle/input/cais-exec-team-in-house/train.csv'
SUBMISSIONS_DATA = '/kaggle/input/cais-exec-team-in-house/sampleSubmission.csv'
TEST_DATA = '/kaggle/input/cais-exec-team-in-house/test.csv'
df = pd.read_csv(TRAIN_DATA, index_col='id')
test_df = pd.read_csv(TEST_DATA, index_col='id')
sub_df = pd.read_csv(SUBMISSIONS_DATA, index_col='id')
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_attribs = list(df.select_dtypes(include=numerics))
num_attribs.remove('grade')
cat_attribs = list(df.select_dtypes(exclude=numerics))
num_pipline = make_pipeline(StandardScaler())
full_pipeline = make_column_transformer((num_pipline, num_attribs), (OneHotEncoder(), cat_attribs))
X = df.drop(columns='grade')
full_pipeline = full_pipeline.fit(X)
X = full_pipeline.transform(X)
y = df.grade
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
models = [LinearRegression(), DecisionTreeRegressor(), RandomForestRegressor()]
for model in models:
scores = cross_val_score(model, X, y, scoring='neg_mean_squared_error', cv=5)
real_scores = np.sqrt(-scores)
print(f'The scores for {model.__class__.__name__} were {real_scores} and the average was {np.average(real_scores)}')
print('-------------------------------------------------') | code |
34129243/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TRAIN_DATA = '/kaggle/input/cais-exec-team-in-house/train.csv'
SUBMISSIONS_DATA = '/kaggle/input/cais-exec-team-in-house/sampleSubmission.csv'
TEST_DATA = '/kaggle/input/cais-exec-team-in-house/test.csv'
df = pd.read_csv(TRAIN_DATA, index_col='id')
test_df = pd.read_csv(TEST_DATA, index_col='id')
sub_df = pd.read_csv(SUBMISSIONS_DATA, index_col='id')
df.info() | code |
34129243/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TRAIN_DATA = '/kaggle/input/cais-exec-team-in-house/train.csv'
SUBMISSIONS_DATA = '/kaggle/input/cais-exec-team-in-house/sampleSubmission.csv'
TEST_DATA = '/kaggle/input/cais-exec-team-in-house/test.csv'
df = pd.read_csv(TRAIN_DATA, index_col='id')
test_df = pd.read_csv(TEST_DATA, index_col='id')
sub_df = pd.read_csv(SUBMISSIONS_DATA, index_col='id')
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_attribs = list(df.select_dtypes(include=numerics))
num_attribs.remove('grade')
cat_attribs = list(df.select_dtypes(exclude=numerics))
num_pipline = make_pipeline(StandardScaler())
full_pipeline = make_column_transformer((num_pipline, num_attribs), (OneHotEncoder(), cat_attribs))
X = df.drop(columns='grade')
full_pipeline = full_pipeline.fit(X)
X = full_pipeline.transform(X)
y = df.grade
bestModel = RandomForestRegressor()
bestModel.fit(X, y)
test_X = full_pipeline.transform(test_df)
predictions = bestModel.predict(test_X) | code |
34129243/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TRAIN_DATA = '/kaggle/input/cais-exec-team-in-house/train.csv'
SUBMISSIONS_DATA = '/kaggle/input/cais-exec-team-in-house/sampleSubmission.csv'
TEST_DATA = '/kaggle/input/cais-exec-team-in-house/test.csv'
df = pd.read_csv(TRAIN_DATA, index_col='id')
test_df = pd.read_csv(TEST_DATA, index_col='id')
sub_df = pd.read_csv(SUBMISSIONS_DATA, index_col='id')
df.describe() | code |
34129243/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler,OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
TRAIN_DATA = '/kaggle/input/cais-exec-team-in-house/train.csv'
SUBMISSIONS_DATA = '/kaggle/input/cais-exec-team-in-house/sampleSubmission.csv'
TEST_DATA = '/kaggle/input/cais-exec-team-in-house/test.csv'
df = pd.read_csv(TRAIN_DATA, index_col='id')
test_df = pd.read_csv(TEST_DATA, index_col='id')
sub_df = pd.read_csv(SUBMISSIONS_DATA, index_col='id')
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
num_attribs = list(df.select_dtypes(include=numerics))
print(num_attribs)
num_attribs.remove('grade')
cat_attribs = list(df.select_dtypes(exclude=numerics))
num_pipline = make_pipeline(StandardScaler())
full_pipeline = make_column_transformer((num_pipline, num_attribs), (OneHotEncoder(), cat_attribs))
X = df.drop(columns='grade')
full_pipeline = full_pipeline.fit(X)
X = full_pipeline.transform(X)
y = df.grade | code |
32062015/cell_4 | [
"text_html_output_1.png"
] | from multiprocessing.pool import ThreadPool
from pyearth import Earth
from sklearn.preprocessing import PolynomialFeatures
import gc
import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import warnings
import pandas as pd
import numpy as np
import gc
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed'}, inplace=True)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
test.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed', 'ForecastId': 'Id'}, inplace=True)
train['Type'] = 'train'
test['Type'] = 'test'
test['Confirmed'] = 0
test['Fatalities'] = 0
import pandas as pd
import numpy as np
import os, gc
train['id_x'] = train['Date'].astype(str).values + '_' + train['State'].astype(str).values + '_' + train['Country'].astype(str).values + '_' + train['Type'].astype(str).values
test['id_x'] = test['Date'].astype(str).values + '_' + test['State'].astype(str).values + '_' + test['Country'].astype(str).values + '_' + test['Type'].astype(str).values
raw = pd.concat([train, test], axis=0, sort=False)
raw['Date'] = pd.to_datetime(raw['Date'])
raw.sort_values('Date', inplace=True)
raw.fillna(0, inplace=True)
Country_State = raw.Country + '_' + raw.State.astype(str)
raw['Country_State_id'] = Country_State.astype('category').cat.codes
raw['Day'] = raw['Date'].astype('category').cat.codes + 1
raw.set_index('Country_State_id', inplace=True)
raw.Day = raw.Day.astype(np.int32)
raw.reset_index(inplace=True)
features = ['id_x', 'Day', 'Id', 'Country_State_id']
train = train.merge(raw[features], on=['id_x'], how='left')
test = test.merge(raw[features], on=['id_x'], how='left')
import os, gc
from multiprocessing.pool import ThreadPool
from sklearn.feature_selection import SelectFromModel
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
os.environ['OMP_NUM_THREADS'] = '1'
gc.enable()
features = ['id_x', 'Day']
X_train = [np.array(train[train.Country_State_id == x][features]) for x in list(train.Country_State_id.unique())]
X_test = [np.array(test[test.Country_State_id == x][features]) for x in list(train.Country_State_id.unique())]
y_target_c = [np.array(train[train.Country_State_id == x][['Confirmed']]) for x in list(train.Country_State_id.unique())]
y_target_f = [np.array(train[train.Country_State_id == x][['Fatalities']]) for x in list(train.Country_State_id.unique())]
poly = PolynomialFeatures(5)
out_ = pd.DataFrame({'id_x': [], 'Confirmed': [], 'Fatalities': []})
from pyearth import Earth
def fit_model(xtrain, xtest, ytrain, ytrain1, idx) -> np.array:
X = xtrain[idx][:, 1]
x_test = xtest[idx][:, 1]
Y = ytrain[idx]
Y1 = ytrain1[idx]
X_transf = poly.fit_transform(X.reshape(-1, 1))
x_test_transf = poly.fit_transform(x_test.reshape(-1, 1))
model = Earth()
model.fit(np.array(X_transf), Y)
conf_p = model.predict(x_test_transf)
model.fit(X_transf, Y1)
conf_f = model.predict(x_test_transf)
res = pd.DataFrame({'id_x': xtest[idx][:, 0], 'Confirmed': conf_p, 'Fatalities': conf_f})
return res
with ThreadPool(processes=4) as pool:
args = [(X_train, X_test, y_target_c, y_target_f, idx) for idx in test.Country_State_id.unique()]
out_ = pd.concat(pool.starmap(fit_model, args))
out_ = test[['id_x']].merge(out_, on='id_x', how='left')
pool.close()
sub = pd.read_csv('../input/covid19-global-forecasting-week-4/submission.csv')
sub_new = sub[['ForecastId']]
result = pd.concat([out_.reset_index().Confirmed, out_.reset_index().Fatalities, sub_new], axis=1)
result.columns = ['ConfirmedCases', 'Fatalities', 'ForecastId']
result = result[['ForecastId', 'ConfirmedCases', 'Fatalities']]
result.to_csv('submission.csv', index=False)
result.head() | code |
32062015/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
import warnings
import pandas as pd
import numpy as np
import gc
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed'}, inplace=True)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
test.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed', 'ForecastId': 'Id'}, inplace=True)
train['Type'] = 'train'
test['Type'] = 'test'
test['Confirmed'] = 0
test['Fatalities'] = 0
print(train['Date'].min(), train['Date'].max())
print(test['Date'].min(), test['Date'].max()) | code |
32062015/cell_3 | [
"text_plain_output_1.png"
] | import numpy as np
import numpy as np
import pandas as pd
import pandas as pd
import warnings
import pandas as pd
import numpy as np
import gc
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')
train.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed'}, inplace=True)
test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')
test.rename(columns={'Country_Region': 'Country', 'Province_State': 'State', 'ConfirmedCases': 'Confirmed', 'ForecastId': 'Id'}, inplace=True)
train['Type'] = 'train'
test['Type'] = 'test'
test['Confirmed'] = 0
test['Fatalities'] = 0
import pandas as pd
import numpy as np
import os, gc
train['id_x'] = train['Date'].astype(str).values + '_' + train['State'].astype(str).values + '_' + train['Country'].astype(str).values + '_' + train['Type'].astype(str).values
test['id_x'] = test['Date'].astype(str).values + '_' + test['State'].astype(str).values + '_' + test['Country'].astype(str).values + '_' + test['Type'].astype(str).values
raw = pd.concat([train, test], axis=0, sort=False)
raw['Date'] = pd.to_datetime(raw['Date'])
raw.sort_values('Date', inplace=True)
raw.fillna(0, inplace=True)
Country_State = raw.Country + '_' + raw.State.astype(str)
raw['Country_State_id'] = Country_State.astype('category').cat.codes
raw['Day'] = raw['Date'].astype('category').cat.codes + 1
raw.set_index('Country_State_id', inplace=True)
raw.Day = raw.Day.astype(np.int32)
raw.reset_index(inplace=True)
features = ['id_x', 'Day', 'Id', 'Country_State_id']
train = train.merge(raw[features], on=['id_x'], how='left')
test = test.merge(raw[features], on=['id_x'], how='left')
print(train.shape, test.shape) | code |
105176374/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.isna().sum()
TrainCols = list(trainData.columns.values)
TestCols = list(testData.columns.values)
Xtrain = trainData.drop('TARGET', axis=1).copy()
Ytrain = trainData[['TARGET']].copy()
print(Xtrain.shape)
print(Ytrain.shape)
Xtest = testData.copy() | code |
105176374/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
print(trainData.shape)
print(testData.shape) | code |
105176374/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.isna().sum() | code |
105176374/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.head() | code |
105176374/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.isna().sum()
trainData.describe() | code |
105176374/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.isna().sum()
TrainCols = list(trainData.columns.values)
TestCols = list(testData.columns.values)
print(TrainCols)
print(TestCols) | code |
105176374/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
testData.head() | code |
105176374/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
trainData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TRAIN.csv', index_col=0)
testData = pd.read_csv('../input/santander-dataset/Santander Customer Satisfaction - TEST-Without TARGET.csv', index_col=0)
trainData.info()
print()
testData.info() | code |
17122208/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/data.csv')
print(df.keys()) | code |
17122208/cell_23 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
fig = plt.gcf()
fig.set_size_inches(x_inches, y_inches)
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data)
position_longpassing = make_barplot('Position', 'LongPassing', df, 20, 10, 'Preferred Foot') | code |
17122208/cell_33 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data);
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data);
acc_stam_regression = make_scatterplot("Stamina", "Acceleration", df, "Preferred Foot")
sns.relplot(x='Stamina', y='Acceleration', hue='Preferred Foot', data=df)
plt.xlabel('Player Stamina Rating')
plt.ylabel('Player Acceleration Rating')
plt.title("FIFA Players' Stamina vs. Acceleration Ratings") | code |
17122208/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
import os
print(os.listdir('../input'))
import warnings
warnings.filterwarnings('ignore')
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('darkgrid') | code |
17122208/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data);
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data);
acc_stam_regression = make_scatterplot("Stamina", "Acceleration", df, "Preferred Foot")
plt.xlim(20, 60)
plt.xlim(50, 80)
plt.figure()
fig = plt.gcf()
fig.set_size_inches(10, 10)
plt.subplot(2, 2, 1)
sns.distplot(df['Age'], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
plt.subplot(2, 2, 2)
sns.distplot(df['Potential'], kde=False, norm_hist=False)
plt.subplots_adjust(left=0) | code |
17122208/cell_19 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
uneven_bins_normalized = make_histogram('Age', [15, 20, 30, 35, 45], norm_hist=True) | code |
17122208/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/data.csv')
df.head() | code |
17122208/cell_28 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data);
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data);
acc_stam_regression = make_scatterplot("Stamina", "Acceleration", df, "Preferred Foot")
acc_stam_regression = make_scatterplot('Stamina', 'Acceleration', df, regression=True) | code |
17122208/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
uneven_bins = make_histogram('Age', [15, 20, 30, 35, 45]) | code |
17122208/cell_35 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data);
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data);
acc_stam_regression = make_scatterplot("Stamina", "Acceleration", df, "Preferred Foot")
sns.relplot(x='Stamina', y='Acceleration', data=df)
plt.xlim(20, 60) | code |
17122208/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
age_histogram = make_histogram('Age', [15, 20, 25, 30, 35, 40, 45]) | code |
17122208/cell_27 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data)
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data)
acc_stam_regression = make_scatterplot('Stamina', 'Acceleration', df, 'Preferred Foot') | code |
17122208/cell_37 | [
"image_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist);
#sns.distplot(df["Age"], bins=[15, 20, 25, 30, 35, 40, 45], kde=False, norm_hist=False)
age_histogram = make_histogram("Age")
def make_barplot(x_column, y_column, data, x_inches, y_inches, hue=None):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param hue: hue column as a string
:param data: dataframe containing above columns
:returns: barplot of the columns
"""
#set size of plot bigger to fit the display
fig = plt.gcf() #create the graph figure
fig.set_size_inches(x_inches, y_inches) #set figure to x inches and y inches
return sns.barplot(x=x_column, y=y_column, hue=hue, data=data);
position_longpassing = make_barplot("Position", "LongPassing", df, 20, 10, "Preferred Foot")
def make_scatterplot(x_column, y_column, data, hue=None, regression=False):
"""
This function returns a seaborn barplot based on the data columns passed in.
:param x_column: x-axis column as a string
:param y_column: y-axis column as a string
:param data: dataframe containing above columns
:param hue: hue column as a string
:param regression: boolean of whether to plot regression
:returns: barplot of the columns
"""
if not regression:
return sns.relplot(x=x_column, y=y_column, hue=hue, data=data);
else:
assert hue is None, "Can't have Hue with Regression Plot"
return sns.regplot(x=x_column, y=y_column, data=data);
acc_stam_regression = make_scatterplot("Stamina", "Acceleration", df, "Preferred Foot")
plt.xlim(20, 60)
plt.plot(df['Overall'], df['Potential'])
plt.plot(df['Overall'], df['Age'])
plt.xlim(50, 80)
plt.legend(['Potential', 'Age'])
plt.xlabel('Overall') | code |
17122208/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/data.csv')
def make_histogram(column, bins=None, kde=False, norm_hist=False):
"""
This function returns a seaborn histogram based on an inputted dataset column.
:param column: column of dataset
:param bins: list of bin values of the histogram
:param kde: boolean of fitting kernel density estimate
:param norm_hist: boolean of normalizing histogram
:returns: histogram of the column
"""
return sns.distplot(df[column], bins=bins, kde=kde, norm_hist=norm_hist)
age_histogram = make_histogram('Age') | code |
105205632/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import plotly_express as px
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
fig_o = px.pie(names = well_prod.index , values = well_prod.values , labels ={"names":"Well ", "values":"Total oil production (bbls)"},
)
fig_o.update_traces(textposition='inside', textinfo='percent+label'
,hoverinfo ='percent+label',marker=dict(line=dict(color='#000000', width=2)))
fig_o.update_layout(
title_text = "Contribution of each well in oil production",legend_title_text="Wells",legend_title_font_size=15,
title_x=.5 , title_font_size=20, paper_bgcolor="#0C2D42",font_color="#fff"
)
fig_o.show()
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
fig_g = px.pie(names=well_prod_g.index, values=well_prod_g.values, labels={'names': 'Well ', 'values': 'Total oil production (bbls)'})
fig_g.update_traces(textposition='inside', textinfo='percent+label', hoverinfo='percent+label', marker=dict(line=dict(color='#000000', width=2)))
fig_g.update_layout(title_text='Contribution of each well in gas production', legend_title_text='Wells', legend_title_font_size=15, title_x=0.5, title_font_size=20, paper_bgcolor='#0C2D42', font_color='#fff')
fig_g.show() | code |
105205632/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly_express as px
import missingno as msn
plt.style.use('bmh')
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
wells = df['NPD_WELL_BORE_NAME'].unique()
plt.figure(figsize=(12, 20))
for i, well in enumerate(wells):
d = df[df['NPD_WELL_BORE_NAME'] == well]
plt.subplot(len(wells), 1, i + 1)
plt.plot(d['DATEPRD'], d['BORE_OIL_VOL'])
plt.plot(d['DATEPRD'], d['BORE_WAT_VOL'])
plt.title(well)
plt.xlabel('Time')
plt.ylabel('Oil & Water production')
plt.tight_layout()
plt.show | code |
105205632/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.head() | code |
105205632/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape
df_ml.columns
df_ml = df_ml[['DATEPRD', 'NPD_WELL_BORE_NAME', 'ON_STREAM_HRS', 'AVG_DOWNHOLE_PRESSURE', 'AVG_DOWNHOLE_TEMPERATURE', 'AVG_DP_TUBING', 'BORE_OIL_VOL', 'BORE_GAS_VOL', 'BORE_WAT_VOL', 'AVG_WHP_P', 'AVG_WHT_P', 'DP_CHOKE_SIZE']]
df_ml.rename(columns={'DATEPRD': 'date', 'NPD_WELL_BORE_NAME': 'well_name', 'ON_STREAM_HRS': 'prod_hrs', 'AVG_DOWNHOLE_PRESSURE': 'bhp', 'AVG_DOWNHOLE_TEMPERATURE': 'bht', 'AVG_DP_TUBING': 'dp_tubing', 'AVG_WHP_P': 'tht', 'AVG_WHT_P': 'thp', 'DP_CHOKE_SIZE': 'choke_size_percentage', 'BORE_OIL_VOL': 'oil_vol', 'BORE_GAS_VOL': 'gas_vol', 'BORE_WAT_VOL': 'water_vol'}, inplace=True)
df_ml.head() | code |
105205632/cell_30 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape
df_ml.columns
df_ml = df_ml[['DATEPRD', 'NPD_WELL_BORE_NAME', 'ON_STREAM_HRS', 'AVG_DOWNHOLE_PRESSURE', 'AVG_DOWNHOLE_TEMPERATURE', 'AVG_DP_TUBING', 'BORE_OIL_VOL', 'BORE_GAS_VOL', 'BORE_WAT_VOL', 'AVG_WHP_P', 'AVG_WHT_P', 'DP_CHOKE_SIZE']]
df_ml.rename(columns={'DATEPRD': 'date', 'NPD_WELL_BORE_NAME': 'well_name', 'ON_STREAM_HRS': 'prod_hrs', 'AVG_DOWNHOLE_PRESSURE': 'bhp', 'AVG_DOWNHOLE_TEMPERATURE': 'bht', 'AVG_DP_TUBING': 'dp_tubing', 'AVG_WHP_P': 'tht', 'AVG_WHT_P': 'thp', 'DP_CHOKE_SIZE': 'choke_size_percentage', 'BORE_OIL_VOL': 'oil_vol', 'BORE_GAS_VOL': 'gas_vol', 'BORE_WAT_VOL': 'water_vol'}, inplace=True)
df_ml.isna().sum()
df_ml = df_ml.dropna()
df_ml.shape
df_ml['oil_rate'] = df_ml['oil_vol'] * 24 / df_ml['prod_hrs']
df_ml['gas_rate'] = df_ml['gas_vol'] * 24 / df_ml['prod_hrs']
df_ml['water_rate'] = df_ml['water_vol'] * 24 / df_ml['prod_hrs']
df_ml['gor'] = df_ml['gas_rate'] / df_ml['oil_rate']
df_ml['wc'] = df_ml['water_rate'] / (df_ml['water_rate'] + df_ml['oil_rate'])
df_ml.drop(['oil_vol', 'gas_vol', 'water_vol'], axis=1, inplace=True)
df_ml.head() | code |
105205632/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape
df_ml.columns | code |
105205632/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum() | code |
105205632/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape
df_ml.columns
df_ml = df_ml[['DATEPRD', 'NPD_WELL_BORE_NAME', 'ON_STREAM_HRS', 'AVG_DOWNHOLE_PRESSURE', 'AVG_DOWNHOLE_TEMPERATURE', 'AVG_DP_TUBING', 'BORE_OIL_VOL', 'BORE_GAS_VOL', 'BORE_WAT_VOL', 'AVG_WHP_P', 'AVG_WHT_P', 'DP_CHOKE_SIZE']]
df_ml.rename(columns={'DATEPRD': 'date', 'NPD_WELL_BORE_NAME': 'well_name', 'ON_STREAM_HRS': 'prod_hrs', 'AVG_DOWNHOLE_PRESSURE': 'bhp', 'AVG_DOWNHOLE_TEMPERATURE': 'bht', 'AVG_DP_TUBING': 'dp_tubing', 'AVG_WHP_P': 'tht', 'AVG_WHT_P': 'thp', 'DP_CHOKE_SIZE': 'choke_size_percentage', 'BORE_OIL_VOL': 'oil_vol', 'BORE_GAS_VOL': 'gas_vol', 'BORE_WAT_VOL': 'water_vol'}, inplace=True)
df_ml.isna().sum()
df_ml = df_ml.dropna()
df_ml.shape | code |
105205632/cell_2 | [
"text_plain_output_1.png"
] | !pip install openpyxl | code |
105205632/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import plotly_express as px
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
fig_o = px.pie(names=well_prod.index, values=well_prod.values, labels={'names': 'Well ', 'values': 'Total oil production (bbls)'})
fig_o.update_traces(textposition='inside', textinfo='percent+label', hoverinfo='percent+label', marker=dict(line=dict(color='#000000', width=2)))
fig_o.update_layout(title_text='Contribution of each well in oil production', legend_title_text='Wells', legend_title_font_size=15, title_x=0.5, title_font_size=20, paper_bgcolor='#0C2D42', font_color='#fff')
fig_o.show() | code |
105205632/cell_19 | [
"text_html_output_2.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape | code |
105205632/cell_8 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly_express as px
import missingno as msn
plt.style.use('bmh')
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
plt.figure(figsize=(15, 6))
plt.title('Oil production for all wells')
sns.lineplot(data=df, x='DATEPRD', y='BORE_OIL_VOL', hue='NPD_WELL_BORE_NAME') | code |
105205632/cell_15 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import plotly_express as px
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
fig_o = px.pie(names = well_prod.index , values = well_prod.values , labels ={"names":"Well ", "values":"Total oil production (bbls)"},
)
fig_o.update_traces(textposition='inside', textinfo='percent+label'
,hoverinfo ='percent+label',marker=dict(line=dict(color='#000000', width=2)))
fig_o.update_layout(
title_text = "Contribution of each well in oil production",legend_title_text="Wells",legend_title_font_size=15,
title_x=.5 , title_font_size=20, paper_bgcolor="#0C2D42",font_color="#fff"
)
fig_o.show()
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
fig_g = px.pie(names = well_prod_g.index , values = well_prod_g.values , labels ={"names":"Well ", "values":"Total oil production (bbls)"},
)
fig_g.update_traces(textposition='inside', textinfo='percent+label'
,hoverinfo ='percent+label',marker=dict(line=dict(color='#000000', width=2)))
fig_g.update_layout(
title_text = "Contribution of each well in gas production",legend_title_text="Wells",legend_title_font_size=15,
title_x=.5 , title_font_size=20, paper_bgcolor="#0C2D42",font_color="#fff"
)
fig_g.show()
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
fig_w = px.pie(names=well_prod_w.index, values=well_prod_w.values, labels={'names': 'Well ', 'values': 'Total oil production (bbls)'})
fig_w.update_traces(textposition='inside', textinfo='percent+label', hoverinfo='percent+label', marker=dict(line=dict(color='#000000', width=2)))
fig_w.update_layout(title_text='Contribution of each well in water production', legend_title_text='Wells', legend_title_font_size=15, title_x=0.5, title_font_size=20, paper_bgcolor='#0C2D42', font_color='#fff')
fig_w.show() | code |
105205632/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df.hist(figsize=(18, 18)) | code |
105205632/cell_24 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w
df_ml = df[df['WELL_TYPE'] == 'OP']
df_ml.shape
df_ml.columns
df_ml = df_ml[['DATEPRD', 'NPD_WELL_BORE_NAME', 'ON_STREAM_HRS', 'AVG_DOWNHOLE_PRESSURE', 'AVG_DOWNHOLE_TEMPERATURE', 'AVG_DP_TUBING', 'BORE_OIL_VOL', 'BORE_GAS_VOL', 'BORE_WAT_VOL', 'AVG_WHP_P', 'AVG_WHT_P', 'DP_CHOKE_SIZE']]
df_ml.rename(columns={'DATEPRD': 'date', 'NPD_WELL_BORE_NAME': 'well_name', 'ON_STREAM_HRS': 'prod_hrs', 'AVG_DOWNHOLE_PRESSURE': 'bhp', 'AVG_DOWNHOLE_TEMPERATURE': 'bht', 'AVG_DP_TUBING': 'dp_tubing', 'AVG_WHP_P': 'tht', 'AVG_WHT_P': 'thp', 'DP_CHOKE_SIZE': 'choke_size_percentage', 'BORE_OIL_VOL': 'oil_vol', 'BORE_GAS_VOL': 'gas_vol', 'BORE_WAT_VOL': 'water_vol'}, inplace=True)
df_ml.isna().sum() | code |
105205632/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g
well_prod_w = df.groupby('NPD_WELL_BORE_NAME')['BORE_WAT_VOL'].sum()
well_prod_w | code |
105205632/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod | code |
105205632/cell_12 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
df.isna().sum()
well_prod = df.groupby('NPD_WELL_BORE_NAME')['BORE_OIL_VOL'].sum()
well_prod
well_prod_g = df.groupby('NPD_WELL_BORE_NAME')['BORE_GAS_VOL'].sum()
well_prod_g | code |
105205632/cell_5 | [
"text_plain_output_1.png"
] | import missingno as msn
import pandas as pd
df = pd.read_excel('../input/volve-production-data/Volve production data.xlsx')
msn.matrix(df) | code |
90127845/cell_4 | [
"image_output_5.png",
"image_output_7.png",
"image_output_4.png",
"image_output_8.png",
"image_output_6.png",
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import os
import pandas as pd
root = '/kaggle/input/tabular-playground-series-mar-2022'
train_df = pd.read_csv(os.path.join(root, 'train.csv'))
train_df['datetime'] = pd.to_datetime(train_df.time)
train_df['date'] = train_df.datetime.dt.date
train_df['time'] = train_df.datetime.dt.time
test_df = pd.read_csv(os.path.join(root, 'test.csv'))
test_df['datetime'] = pd.to_datetime(test_df.time)
test_df['date'] = test_df.datetime.dt.date
test_df['time'] = test_df.datetime.dt.time
train_df | code |
90127845/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import datetime
import os
import pandas as pd
root = '/kaggle/input/tabular-playground-series-mar-2022'
train_df = pd.read_csv(os.path.join(root, 'train.csv'))
train_df['datetime'] = pd.to_datetime(train_df.time)
train_df['date'] = train_df.datetime.dt.date
train_df['time'] = train_df.datetime.dt.time
test_df = pd.read_csv(os.path.join(root, 'test.csv'))
test_df['datetime'] = pd.to_datetime(test_df.time)
test_df['date'] = test_df.datetime.dt.date
test_df['time'] = test_df.datetime.dt.time
sep_30 = datetime.date(1991, 9, 30)
mondays = train_df[train_df.datetime.dt.dayofweek == 0]
mondays['is_morning'] = mondays.datetime.dt.hour < 12
mondays[mondays.datetime.dt.date < sep_30].groupby('date').congestion.mean().plot()
plt.title('Congestion by date')
plt.ylabel('avg daily congestion')
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.