path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2020968/cell_79
[ "text_plain_output_1.png" ]
from sklearn.feature_selection import RFECV from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') columns = ['SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked'] chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) columns = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp_scaled', 'Parch_scaled', 'Fare_scaled'] columns_not_scaled = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp', 'Parch', 'Fare'] from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train[columns], train['Survived']) coefficients = lr.coef_ feature_importance = pd.Series(coefficients[0], index=train[columns].columns) lr.fit(train[columns_not_scaled], train['Survived']) coefficients2 = lr.coef_ feature_importance_2 = pd.Series(coefficients2[0], index=train[columns_not_scaled].columns) train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') train = create_dummies(train, 'Cabin') holdout = create_dummies(holdout, 'Cabin') columns_cabins_titles = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'SibSp_scaled', 'Parch_scaled', 'Fare_categories_0-12$', 'Fare_categories_12-50$', 'Fare_categories_50-100$', 'Fare_categories_100+$', 'titles_ Capt', 'titles_ Col', 'titles_ Don', 'titles_ Dr', 'titles_ Jonkheer', 'titles_ Lady', 'titles_ Major', 'titles_ Master', 'titles_ Miss', 'titles_ Mlle', 'titles_ Mme', 'titles_ Mr', 'titles_ Mrs', 'titles_ Ms', 'titles_ Rev', 'titles_ Sir', 'titles_ the Countess', 'Cabin_A', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_T', 'Cabin_u'] other_cols = ['Pclass_1', 'Pclass_2', 'Pclass_3'] columns_cabins_titles = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'SibSp_scaled', 'Parch_scaled', 'Fare_categories_0-12$', 'Fare_categories_12-50$', 'Fare_categories_50-100$', 'Fare_categories_100+$', 'titles_ Capt', 'titles_ Col', 'titles_ Don', 'titles_ Dr', 'titles_ Jonkheer', 'titles_ Lady', 'titles_ Major', 'titles_ Master', 'titles_ Miss', 'titles_ Mlle', 'titles_ Mme', 'titles_ Mr', 'titles_ Mrs', 'titles_ Ms', 'titles_ Rev', 'titles_ Sir', 'titles_ the Countess', 'Cabin_A', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_T'] from sklearn.feature_selection import RFECV lr = LogisticRegression() selector = RFECV(lr, cv=10) selector.fit(train[columns_cabins_titles], train['Survived'])
code
2020968/cell_30
[ "image_output_1.png" ]
from sklearn.preprocessing import minmax_scale import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) from sklearn.preprocessing import minmax_scale cols = ['SibSp', 'Parch', 'Fare'] new_cols = ['SibSp_scaled', 'Parch_scaled', 'Fare_scaled'] for col, new_col in zip(cols, new_cols): train[new_col] = minmax_scale(train[col]) holdout[new_col] = minmax_scale(holdout[col])
code
2020968/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) plot_survival(train, 'Pclass', color='blue', use_index=True)
code
2020968/cell_60
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') train = create_dummies(train, 'Cabin') holdout = create_dummies(holdout, 'Cabin') print('We have now ', len(train.columns), 'columns as predictors to fit')
code
2020968/cell_69
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') columns = ['SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked'] chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) columns = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp_scaled', 'Parch_scaled', 'Fare_scaled'] columns_not_scaled = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp', 'Parch', 'Fare'] from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train[columns], train['Survived']) coefficients = lr.coef_ feature_importance = pd.Series(coefficients[0], index=train[columns].columns) lr.fit(train[columns_not_scaled], train['Survived']) coefficients2 = lr.coef_ feature_importance_2 = pd.Series(coefficients2[0], index=train[columns_not_scaled].columns) ordered_feature_importance = feature_importance.abs().sort_values() ordered_feature_importance.plot.barh(color='blue') train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') train = create_dummies(train, 'Cabin') holdout = create_dummies(holdout, 'Cabin') columns_cabins_titles = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'SibSp_scaled', 'Parch_scaled', 'Fare_categories_0-12$', 'Fare_categories_12-50$', 'Fare_categories_50-100$', 'Fare_categories_100+$', 'titles_ Capt', 'titles_ Col', 'titles_ Don', 'titles_ Dr', 'titles_ Jonkheer', 'titles_ Lady', 'titles_ Major', 'titles_ Master', 'titles_ Miss', 'titles_ Mlle', 'titles_ Mme', 'titles_ Mr', 'titles_ Mrs', 'titles_ Ms', 'titles_ Rev', 'titles_ Sir', 'titles_ the Countess', 'Cabin_A', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_T', 'Cabin_u'] other_cols = ['Pclass_1', 'Pclass_2', 'Pclass_3'] logreg = LogisticRegression() logreg.fit(train[columns_cabins_titles], train['Survived']) feature_importance_2 = logreg.coef_ feature_importance_2 = pd.Series(feature_importance_2[0], index=train[columns_cabins_titles].columns) ordered_feature_importance = feature_importance_2.abs().sort_values() ordered_feature_importance.plot.barh(color='blue', figsize=(10, 10))
code
2020968/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) plot_survival(train, 'titles', use_index=False, num_xticks=len(train['titles'].unique()) - 1, xticks=train['titles'].unique().sort_values())
code
2020968/cell_49
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') print('number of null values :', train['Cabin'].isnull().sum()) print('number of non_null values :', train['Cabin'].notnull().sum())
code
2020968/cell_58
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') train = create_dummies(train, 'Cabin') holdout = create_dummies(holdout, 'Cabin') plot_survival(train, 'Cabin', use_index=True)
code
2020968/cell_38
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') columns = ['SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked'] chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) columns = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp_scaled', 'Parch_scaled', 'Fare_scaled'] columns_not_scaled = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'SibSp', 'Parch', 'Fare'] from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(train[columns], train['Survived']) coefficients = lr.coef_ feature_importance = pd.Series(coefficients[0], index=train[columns].columns) lr.fit(train[columns_not_scaled], train['Survived']) coefficients2 = lr.coef_ feature_importance_2 = pd.Series(coefficients2[0], index=train[columns_not_scaled].columns) ordered_feature_importance = feature_importance.abs().sort_values() ordered_feature_importance.plot.barh(color='blue')
code
2020968/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) plot_survival(train, 'Age_categories', use_index=False, num_xticks=len(train['Age_categories'].unique()), xticks=train['Age_categories'].unique().sort_values())
code
2020968/cell_77
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) cut_points = [-1, 0, 5, 12, 18, 35, 60, 100] label_names = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] def process_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df train = process_age(train, cut_points, label_names) holdout = process_age(holdout, cut_points, label_names) def process_fare(df, cut_points, label_names): df['Fare_categories'] = pd.cut(df['Fare'], cut_points, labels=label_names) return df train = process_fare(train, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) holdout = process_fare(holdout, [0, 12, 50, 100, 1000], ['0-12$', '12-50$', '50-100$', '100+$']) def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for col in ['Age_categories', 'Pclass', 'Sex', 'Embarked', 'Fare_categories']: train = create_dummies(train, col) holdout = create_dummies(holdout, col) train = create_dummies(train, 'titles') holdout = create_dummies(holdout, 'titles') train = create_dummies(train, 'Cabin') holdout = create_dummies(holdout, 'Cabin') columns_cabins_titles = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'SibSp_scaled', 'Parch_scaled', 'Fare_categories_0-12$', 'Fare_categories_12-50$', 'Fare_categories_50-100$', 'Fare_categories_100+$', 'titles_ Capt', 'titles_ Col', 'titles_ Don', 'titles_ Dr', 'titles_ Jonkheer', 'titles_ Lady', 'titles_ Major', 'titles_ Master', 'titles_ Miss', 'titles_ Mlle', 'titles_ Mme', 'titles_ Mr', 'titles_ Mrs', 'titles_ Ms', 'titles_ Rev', 'titles_ Sir', 'titles_ the Countess', 'Cabin_A', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_T', 'Cabin_u'] other_cols = ['Pclass_1', 'Pclass_2', 'Pclass_3'] columns_cabins_titles = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'SibSp_scaled', 'Parch_scaled', 'Fare_categories_0-12$', 'Fare_categories_12-50$', 'Fare_categories_50-100$', 'Fare_categories_100+$', 'titles_ Capt', 'titles_ Col', 'titles_ Don', 'titles_ Dr', 'titles_ Jonkheer', 'titles_ Lady', 'titles_ Major', 'titles_ Master', 'titles_ Miss', 'titles_ Mlle', 'titles_ Mme', 'titles_ Mr', 'titles_ Mrs', 'titles_ Ms', 'titles_ Rev', 'titles_ Sir', 'titles_ the Countess', 'Cabin_A', 'Cabin_B', 'Cabin_C', 'Cabin_D', 'Cabin_E', 'Cabin_F', 'Cabin_G', 'Cabin_T'] def plot_correlation_heatmap(df): corr = df.corr() sns.set(style="white") mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(520, 10, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) plt.show() corrs = train[columns_cabins_titles].corr() plot_correlation_heatmap(corrs.corr())
code
2020968/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) plot_survival(train, 'Sex', color=['pink', 'blue'], use_index=False, num_xticks=2, xticks=['Female', 'Male'])
code
2020968/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') chance_survive = len(train[train['Survived'] == 1]) / len(train['Survived']) def plot_survival(df, index, color='blue', use_index=True, num_xticks=0, xticks='', position=0.5, legend=['General probabilty of survival']): df_pivot = df.pivot_table(index=index, values='Survived') if num_xticks > 0: plt.xticks(range(num_xticks), xticks) plot_survival(train, 'Parch', 'red', True, position=0.3) plot_survival(train, 'SibSp', 'blue', True)
code
2020968/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') columns = ['SibSp', 'Parch', 'Fare', 'Cabin', 'Embarked'] holdout[columns].describe()
code
2020630/cell_4
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output data = pd.read_csv('../input/scrubbed.csv') usa = data[data.country == 'us'] texas = data[data.state == 'tx'] illinois = data[data.state == 'il'] illinois.head(115)
code
2020630/cell_2
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output data = pd.read_csv('../input/scrubbed.csv') usa = data[data.country == 'us'] usa.head()
code
2020630/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_html_output_1.png", "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) data = pd.read_csv('../input/scrubbed.csv') data.head()
code
2020630/cell_3
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output data = pd.read_csv('../input/scrubbed.csv') usa = data[data.country == 'us'] texas = data[data.state == 'tx'] texas.head()
code
2020630/cell_5
[ "text_html_output_1.png" ]
from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from subprocess import check_output data = pd.read_csv('../input/scrubbed.csv') usa = data[data.country == 'us'] texas = data[data.state == 'tx'] illinois = data[data.state == 'il'] elmwood = data[data.city == 'elmwood park'] elmwood.head(10)
code
50224851/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') plt.figure(figsize=(10, 8)) sns.countplot(df_train['label'], edgecolor='black', palette='mako')
code
50224851/cell_20
[ "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') path = '../input/cassava-leaf-disease-classification/train_images/' df0 = df_train[df_train['label'] == '0'] files = df0['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df1 = df_train[df_train['label'] == '1'] files = df1['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df2 = df_train[df_train['label'] == '2'] files = df2['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df3 = df_train[df_train['label'] == '3'] files = df3['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df4 = df_train[df_train['label'] == '4'] files = df4['image_id'].sample(3).tolist() plt.figure(figsize=(15, 5)) index = 0 for file in files: image = Image.open(path + file) plt.subplot(1, 3, index + 1) plt.imshow(image) plt.axis('off') index += 1 plt.show()
code
50224851/cell_6
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) df_train.info()
code
50224851/cell_18
[ "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') path = '../input/cassava-leaf-disease-classification/train_images/' df0 = df_train[df_train['label'] == '0'] files = df0['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df1 = df_train[df_train['label'] == '1'] files = df1['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df2 = df_train[df_train['label'] == '2'] files = df2['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df3 = df_train[df_train['label'] == '3'] files = df3['image_id'].sample(3).tolist() plt.figure(figsize=(15, 5)) index = 0 for file in files: image = Image.open(path + file) plt.subplot(1, 3, index + 1) plt.imshow(image) plt.axis('off') index += 1 plt.show()
code
50224851/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') path = '../input/cassava-leaf-disease-classification/train_images/' df0 = df_train[df_train['label'] == '0'] files = df0['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df1 = df_train[df_train['label'] == '1'] files = df1['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df2 = df_train[df_train['label'] == '2'] files = df2['image_id'].sample(3).tolist() plt.figure(figsize=(15, 5)) index = 0 for file in files: image = Image.open(path + file) plt.subplot(1, 3, index + 1) plt.imshow(image) plt.axis('off') index += 1 plt.show()
code
50224851/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') path = '../input/cassava-leaf-disease-classification/train_images/' df0 = df_train[df_train['label'] == '0'] files = df0['image_id'].sample(3).tolist() index = 0 for file in files: image = Image.open(path + file) plt.axis('off') index += 1 df1 = df_train[df_train['label'] == '1'] files = df1['image_id'].sample(3).tolist() plt.figure(figsize=(15, 5)) index = 0 for file in files: image = Image.open(path + file) plt.subplot(1, 3, index + 1) plt.imshow(image) plt.axis('off') index += 1 plt.show()
code
50224851/cell_12
[ "text_html_output_1.png" ]
from PIL import Image import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train['label'] = df_train['label'].astype(str) sns.set_style('whitegrid') path = '../input/cassava-leaf-disease-classification/train_images/' df0 = df_train[df_train['label'] == '0'] files = df0['image_id'].sample(3).tolist() plt.figure(figsize=(15, 5)) index = 0 for file in files: image = Image.open(path + file) plt.subplot(1, 3, index + 1) plt.imshow(image) plt.axis('off') index += 1 plt.show()
code
50224851/cell_5
[ "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv') df_train.head()
code
2010942/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.head()
code
2010942/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib import matplotlib.pyplot as plt from scipy.stats import skew from scipy.stats.stats import pearsonr train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train = train.drop(['Name'], axis=1) test = test.drop(['Name'], axis=1) print(train.head()) print('_' * 80) test.head()
code
2010942/cell_7
[ "text_plain_output_1.png" ]
from scipy.stats import skew import matplotlib import numpy as np import pandas as pd all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'Fare': train['Fare'], 'log(price + 1)': np.log1p(train['Fare'])}) numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index all_data[skewed_feats] = np.log1p(all_data[skewed_feats]) all_data = pd.get_dummies(all_data) all_data = all_data.fillna(all_data.mean()) print(all_data)
code
2010942/cell_8
[ "text_plain_output_1.png" ]
from scipy.stats import skew import matplotlib import numpy as np import pandas as pd all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'Fare': train['Fare'], 'log(price + 1)': np.log1p(train['Fare'])}) numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index all_data[skewed_feats] = np.log1p(all_data[skewed_feats]) all_data = pd.get_dummies(all_data) all_data = all_data.fillna(all_data.mean()) X_train = all_data[:train.shape[0]] X_test = all_data[train.shape[0]:] y = train.Survived print(y)
code
2010942/cell_3
[ "text_plain_output_1.png" ]
import matplotlib import numpy as np import pandas as pd all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'Fare': train['Fare'], 'log(price + 1)': np.log1p(train['Fare'])}) prices.hist()
code
2010942/cell_10
[ "text_html_output_1.png" ]
from scipy.stats import skew from sklearn.linear_model import LogisticRegression import matplotlib import numpy as np import pandas as pd all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) matplotlib.rcParams['figure.figsize'] = (12.0, 6.0) prices = pd.DataFrame({'Fare': train['Fare'], 'log(price + 1)': np.log1p(train['Fare'])}) numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index all_data[skewed_feats] = np.log1p(all_data[skewed_feats]) all_data = pd.get_dummies(all_data) all_data = all_data.fillna(all_data.mean()) X_train = all_data[:train.shape[0]] X_test = all_data[train.shape[0]:] y = train.Survived logreg = LogisticRegression() logreg.fit(X_train, y) accuracy = round(logreg.score(X_train, y) * 100, 2) print(accuracy) logreg_preds = logreg.predict(X_test)
code
121151188/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 1.e1, 'index'] = 'Others' event_count=event_count.head(10) fig=px.pie(event_count,values='fqid',names='index',template="plotly_dark", title="Analysis and Comparison of different fqid across index") fig.show() event_count = train['level_group'].value_counts().reset_index() event_count.columns = ['level_group', 'count'] fig = px.bar(event_count, x='level_group', y='count', color='count', title="Count the number of occurrences of each level group", hover_data=event_count,template="plotly_dark") fig.show() page_count = train['event_name'].value_counts().reset_index() page_count.columns = ['event_name', 'count'] fig = px.bar(page_count, x='event_name', y='count', color='count', title='Count the number of occurrences of each Event', template='plotly_dark') fig.show()
code
121151188/cell_9
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum()
code
121151188/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.info()
code
121151188/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 10.0, 'index'] = 'Others' event_count = event_count.head(10) fig = px.pie(event_count, values='fqid', names='index', template='plotly_dark', title='Analysis and Comparison of different fqid across index') fig.show()
code
121151188/cell_7
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.head()
code
121151188/cell_8
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train_labels.head()
code
121151188/cell_15
[ "text_html_output_2.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 1.e1, 'index'] = 'Others' event_count=event_count.head(10) fig=px.pie(event_count,values='fqid',names='index',template="plotly_dark", title="Analysis and Comparison of different fqid across index") fig.show() event_count = train['level_group'].value_counts().reset_index() event_count.columns = ['level_group', 'count'] fig = px.bar(event_count, x='level_group', y='count', color='count', title="Count the number of occurrences of each level group", hover_data=event_count,template="plotly_dark") fig.show() page_count = train['event_name'].value_counts().reset_index() page_count.columns = ['event_name', 'count'] fig = px.bar(page_count, x='event_name', y='count', color='count', title="Count the number of occurrences of each Event", template="plotly_dark") fig.show() events_in_session = pd.DataFrame(train.groupby('room_fqid').size()) events_in_session.columns = ["room_fqid"] fig = px.histogram(events_in_session, x="room_fqid", title="Types of room_fqid in dataset", template="plotly_dark") fig.update_layout(bargap=0.2) fig.show() events_in_session = pd.DataFrame(train.groupby('session_id').size()) events_in_session.columns = ['n_of_events'] fig = px.histogram(events_in_session, x='n_of_events', title='Types of events in dataset', template='plotly_dark') fig.show()
code
121151188/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 1.e1, 'index'] = 'Others' event_count=event_count.head(10) fig=px.pie(event_count,values='fqid',names='index',template="plotly_dark", title="Analysis and Comparison of different fqid across index") fig.show() event_count = train['level_group'].value_counts().reset_index() event_count.columns = ['level_group', 'count'] fig = px.bar(event_count, x='level_group', y='count', color='count', title="Count the number of occurrences of each level group", hover_data=event_count,template="plotly_dark") fig.show() page_count = train['event_name'].value_counts().reset_index() page_count.columns = ['event_name', 'count'] fig = px.bar(page_count, x='event_name', y='count', color='count', title="Count the number of occurrences of each Event", template="plotly_dark") fig.show() events_in_session = pd.DataFrame(train.groupby('room_fqid').size()) events_in_session.columns = ["room_fqid"] fig = px.histogram(events_in_session, x="room_fqid", title="Types of room_fqid in dataset", template="plotly_dark") fig.update_layout(bargap=0.2) fig.show() events_in_session = pd.DataFrame(train.groupby('session_id').size()) events_in_session.columns = ["n_of_events"] fig = px.histogram(events_in_session, x="n_of_events", title="Types of events in dataset", template="plotly_dark") fig.show() fig = px.bar(train['event_name'].value_counts(), text_auto=True, title='Types of events in dataset', template='plotly_dark') fig.show()
code
121151188/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 1.e1, 'index'] = 'Others' event_count=event_count.head(10) fig=px.pie(event_count,values='fqid',names='index',template="plotly_dark", title="Analysis and Comparison of different fqid across index") fig.show() event_count = train['level_group'].value_counts().reset_index() event_count.columns = ['level_group', 'count'] fig = px.bar(event_count, x='level_group', y='count', color='count', title="Count the number of occurrences of each level group", hover_data=event_count,template="plotly_dark") fig.show() page_count = train['event_name'].value_counts().reset_index() page_count.columns = ['event_name', 'count'] fig = px.bar(page_count, x='event_name', y='count', color='count', title="Count the number of occurrences of each Event", template="plotly_dark") fig.show() events_in_session = pd.DataFrame(train.groupby('room_fqid').size()) events_in_session.columns = ['room_fqid'] fig = px.histogram(events_in_session, x='room_fqid', title='Types of room_fqid in dataset', template='plotly_dark') fig.update_layout(bargap=0.2) fig.show()
code
121151188/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import plotly.express as px train = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv') train_labels = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train_labels.csv') test = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/test.csv') train.isnull().sum() event_count = train['fqid'].value_counts().reset_index() event_count.loc[event_count['fqid'] < 1.e1, 'index'] = 'Others' event_count=event_count.head(10) fig=px.pie(event_count,values='fqid',names='index',template="plotly_dark", title="Analysis and Comparison of different fqid across index") fig.show() event_count = train['level_group'].value_counts().reset_index() event_count.columns = ['level_group', 'count'] fig = px.bar(event_count, x='level_group', y='count', color='count', title='Count the number of occurrences of each level group', hover_data=event_count, template='plotly_dark') fig.show()
code
128008497/cell_4
[ "text_plain_output_1.png" ]
from tqdm import tqdm import pandas as pd import pandas as pd from tqdm import tqdm tqdm.pandas() df = pd.read_csv('/kaggle/input/demand-forecasting-kernels-only/train.csv') df df['date'] = df.progress_apply(lambda x: f"{x['date']} {x['store']}:{x['item']}", axis=1) df df['date'] = pd.to_datetime(df['date']) df = df.sort_values(by='date') df
code
128008497/cell_1
[ "text_html_output_1.png" ]
from tqdm import tqdm import pandas as pd import pandas as pd from tqdm import tqdm tqdm.pandas() df = pd.read_csv('/kaggle/input/demand-forecasting-kernels-only/train.csv') df
code
128008497/cell_7
[ "text_plain_output_1.png" ]
!git clone https://github.com/mnansary/Informer2020HDFC.git
code
128008497/cell_16
[ "text_html_output_1.png" ]
from exp.exp_informer import Exp_Informer from utils.tools import dotdict import torch args = dotdict() args.model = 'informer' args.data = 'custom' args.root_path = '/kaggle/working/' args.data_path = 'converted.csv' args.features = 'S' args.target = 'sales' args.freq = 't' args.checkpoints = './informer_checkpoints' args.seq_len = 96 args.label_len = 48 args.pred_len = 24 args.enc_in = 7 args.dec_in = 7 args.c_out = 7 args.factor = 5 args.d_model = 512 args.n_heads = 8 args.e_layers = 3 args.d_layers = 2 args.d_ff = 2048 args.dropout = 0.05 args.attn = 'prob' args.embed = 'timeF' args.activation = 'gelu' args.distil = True args.output_attention = False args.mix = True args.padding = 0 args.freq = 't' args.batch_size = 32 args.learning_rate = 0.0001 args.loss = 'mse' args.lradj = 'type1' args.use_amp = False args.num_workers = 0 args.itr = 1 args.train_epochs = 6 args.patience = 3 args.des = 'exp' args.use_gpu = True if torch.cuda.is_available() else False args.gpu = 0 args.use_multi_gpu = False args.devices = '0,1,2,3' args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.devices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] data_parser = {'custom': {'data': 'converted.csv', 'T': 'sales', 'M': [1, 1, 1], 'S': [1, 1, 1], 'MS': [1, 1, 1]}} if args.data in data_parser.keys(): data_info = data_parser[args.data] args.data_path = data_info['data'] args.target = data_info['T'] args.enc_in, args.dec_in, args.c_out = data_info[args.features] args.detail_freq = args.freq args.freq = args.freq[-1:] Exp = Exp_Informer for ii in range(args.itr): setting = '{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_at{}_fc{}_eb{}_dt{}_mx{}_{}_{}'.format(args.model, args.data, args.features, args.seq_len, args.label_len, args.pred_len, args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.attn, args.factor, args.embed, args.distil, args.mix, args.des, ii) exp = Exp(args) print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) exp.train(setting) print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) exp.test(setting) torch.cuda.empty_cache()
code
128008497/cell_3
[ "text_html_output_1.png" ]
from tqdm import tqdm import pandas as pd import pandas as pd from tqdm import tqdm tqdm.pandas() df = pd.read_csv('/kaggle/input/demand-forecasting-kernels-only/train.csv') df df['date'] = df.progress_apply(lambda x: f"{x['date']} {x['store']}:{x['item']}", axis=1) df
code
128008497/cell_14
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from utils.tools import dotdict import torch args = dotdict() args.model = 'informer' args.data = 'custom' args.root_path = '/kaggle/working/' args.data_path = 'converted.csv' args.features = 'S' args.target = 'sales' args.freq = 't' args.checkpoints = './informer_checkpoints' args.seq_len = 96 args.label_len = 48 args.pred_len = 24 args.enc_in = 7 args.dec_in = 7 args.c_out = 7 args.factor = 5 args.d_model = 512 args.n_heads = 8 args.e_layers = 3 args.d_layers = 2 args.d_ff = 2048 args.dropout = 0.05 args.attn = 'prob' args.embed = 'timeF' args.activation = 'gelu' args.distil = True args.output_attention = False args.mix = True args.padding = 0 args.freq = 't' args.batch_size = 32 args.learning_rate = 0.0001 args.loss = 'mse' args.lradj = 'type1' args.use_amp = False args.num_workers = 0 args.itr = 1 args.train_epochs = 6 args.patience = 3 args.des = 'exp' args.use_gpu = True if torch.cuda.is_available() else False args.gpu = 0 args.use_multi_gpu = False args.devices = '0,1,2,3' args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.devices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] data_parser = {'custom': {'data': 'converted.csv', 'T': 'sales', 'M': [1, 1, 1], 'S': [1, 1, 1], 'MS': [1, 1, 1]}} if args.data in data_parser.keys(): data_info = data_parser[args.data] args.data_path = data_info['data'] args.target = data_info['T'] args.enc_in, args.dec_in, args.c_out = data_info[args.features] args.detail_freq = args.freq args.freq = args.freq[-1:] print('Args in experiment:') print(args)
code
128008497/cell_5
[ "text_plain_output_1.png" ]
from tqdm import tqdm import pandas as pd import pandas as pd from tqdm import tqdm tqdm.pandas() df = pd.read_csv('/kaggle/input/demand-forecasting-kernels-only/train.csv') df df['date'] = df.progress_apply(lambda x: f"{x['date']} {x['store']}:{x['item']}", axis=1) df df['date'] = pd.to_datetime(df['date']) df = df.sort_values(by='date') df df = df[['date', 'sales']] df.to_csv('converted.csv', index=False) df
code
104119002/cell_13
[ "image_output_1.png" ]
from matplotlib.colors import ListedColormap, LinearSegmentedColormap import matplotlib.pyplot as plt import numpy as np def plot_examples(colormaps): """ Helper function to plot data with associated colormap. """ np.random.seed(19680801) data = np.random.randn(30, 30) n = len(colormaps) fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3), constrained_layout=True, squeeze=False) for [ax, cmap] in zip(axs.flat, colormaps): psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4) fig.colorbar(psm, ax=ax) plt.show() my_palette = ['#3c9ab2', '#56a6ba', '#71b3c2', '#88b774', '#d1c74c', '#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300'] cmap = ListedColormap(my_palette) cmap_members = ListedColormap(['#d1c74c', '#88b774', '#71b3c2', '#56a6ba', '#3c9ab2']) cmap_casual = ListedColormap(['#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300']) colors_member_casual = {'Member': '#88b774', 'Casual': '#ea5c00'} colors_day_of_week = {'Monday': '#3c9ab2', 'Tuesday': '#71b3c2', 'Wednesday': '#88b774', 'Thursday': '#ffde45', 'Friday': '#e4b80d', 'Saturday': '#ea5c00', 'Sunday': '#f22300'} plot_examples([cmap_casual])
code
104119002/cell_34
[ "text_html_output_1.png" ]
from google.colab import drive import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df.query('distance > 20')[['distance', 'ride_length', 'start_station', 'end_station']].sort_values(by=['distance'], ascending=False).head(5) df = df.query('distance < 43') df = df.drop(['ride_id', 'started_at', 'ended_at', 'day_of_week', 'month', 'ride_length'], axis=1) months = [df.query(f'int_month == {i}') for i in range(1, 13)] sample_df = df.sample(n=6758) sample_df = sample_df.sort_values(by=['date', 'time_start']) sample_df.describe().T
code
104119002/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import h3 import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as plticker from matplotlib import cm from matplotlib.colors import ListedColormap, LinearSegmentedColormap import numpy as np import calendar import geopandas as gpd from shapely.geometry import Point, Polygon from mpl_toolkits.axes_grid1 import make_axes_locatable from mpl_toolkits.axes_grid1.inset_locator import inset_axes
code
104119002/cell_11
[ "text_plain_output_1.png" ]
from matplotlib.colors import ListedColormap, LinearSegmentedColormap import matplotlib.pyplot as plt import numpy as np def plot_examples(colormaps): """ Helper function to plot data with associated colormap. """ np.random.seed(19680801) data = np.random.randn(30, 30) n = len(colormaps) fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3), constrained_layout=True, squeeze=False) for [ax, cmap] in zip(axs.flat, colormaps): psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4) fig.colorbar(psm, ax=ax) plt.show() my_palette = ['#3c9ab2', '#56a6ba', '#71b3c2', '#88b774', '#d1c74c', '#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300'] cmap = ListedColormap(my_palette) cmap_members = ListedColormap(['#d1c74c', '#88b774', '#71b3c2', '#56a6ba', '#3c9ab2']) cmap_casual = ListedColormap(['#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300']) colors_member_casual = {'Member': '#88b774', 'Casual': '#ea5c00'} colors_day_of_week = {'Monday': '#3c9ab2', 'Tuesday': '#71b3c2', 'Wednesday': '#88b774', 'Thursday': '#ffde45', 'Friday': '#e4b80d', 'Saturday': '#ea5c00', 'Sunday': '#f22300'} plot_examples([cmap])
code
104119002/cell_28
[ "text_html_output_1.png" ]
from google.colab import drive import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df.query('distance > 20')[['distance', 'ride_length', 'start_station', 'end_station']].sort_values(by=['distance'], ascending=False).head(5) df = df.query('distance < 43') df = df.drop(['ride_id', 'started_at', 'ended_at', 'day_of_week', 'month', 'ride_length'], axis=1) df.head(5)
code
104119002/cell_15
[ "image_output_1.png" ]
import calendar months_order = [7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6] month_names = {i: name for i, name in enumerate(calendar.month_name) if i != 0} print(month_names) days = {name: i + 1 for i, name in enumerate(calendar.day_name)} print(days) days_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
code
104119002/cell_17
[ "image_output_1.png" ]
from google.colab import drive import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df[['start_lat', 'start_lng', 'end_lat', 'end_lng']].describe().T
code
104119002/cell_35
[ "text_html_output_1.png" ]
from google.colab import drive import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df.query('distance > 20')[['distance', 'ride_length', 'start_station', 'end_station']].sort_values(by=['distance'], ascending=False).head(5) df = df.query('distance < 43') df = df.drop(['ride_id', 'started_at', 'ended_at', 'day_of_week', 'month', 'ride_length'], axis=1) months = [df.query(f'int_month == {i}') for i in range(1, 13)] sample_df = df.sample(n=6758) df.describe().T
code
104119002/cell_24
[ "text_html_output_1.png" ]
from google.colab import drive import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df.query('distance > 20')[['distance', 'ride_length', 'start_station', 'end_station']].sort_values(by=['distance'], ascending=False).head(5)
code
104119002/cell_22
[ "text_plain_output_1.png" ]
from google.colab import drive import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df[['distance', 'ride_length_minutes']].describe().T
code
104119002/cell_37
[ "text_html_output_1.png" ]
from google.colab import drive import calendar import h3 import pandas as pd kaggle = False try: from google.colab import drive drive.mount('/content/drive') except ModuleNotFoundError: path = '../input/google-bike-share/clean_df.csv' kaggle = True else: path = '/content/drive/MyDrive/Colab Notebooks/Google Capstone Project/data/clean_data/240/clean_df.csv' finally: df = pd.read_csv(path) months_order = [7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6] month_names = {i: name for i, name in enumerate(calendar.month_name) if i != 0} days = {name: i + 1 for i, name in enumerate(calendar.day_name)} days_order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] df = df.query('start_lat < 42.2') df['distance'] = df.apply(lambda row: h3.point_dist((row['start_lat'], row['start_lng']), (row['end_lat'], row['end_lng']), unit='km'), axis=1) df = df.drop(['start_lat', 'start_lng', 'end_lat', 'end_lng'], axis=1) df.query('distance > 20')[['distance', 'ride_length', 'start_station', 'end_station']].sort_values(by=['distance'], ascending=False).head(5) df = df.query('distance < 43') df = df.drop(['ride_id', 'started_at', 'ended_at', 'day_of_week', 'month', 'ride_length'], axis=1) months = [df.query(f'int_month == {i}') for i in range(1, 13)] sample_df = df.sample(n=6758) df.describe().T total = [] for i in range(1, 13): month = month_names[i] num_rows = df.query(f'int_month == {i}')['ride_id'].count() total.append(num_rows) print(f'{month}: {num_rows}')
code
104119002/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from matplotlib.colors import ListedColormap, LinearSegmentedColormap import matplotlib.pyplot as plt import numpy as np def plot_examples(colormaps): """ Helper function to plot data with associated colormap. """ np.random.seed(19680801) data = np.random.randn(30, 30) n = len(colormaps) fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3), constrained_layout=True, squeeze=False) for [ax, cmap] in zip(axs.flat, colormaps): psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4) fig.colorbar(psm, ax=ax) plt.show() my_palette = ['#3c9ab2', '#56a6ba', '#71b3c2', '#88b774', '#d1c74c', '#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300'] cmap = ListedColormap(my_palette) cmap_members = ListedColormap(['#d1c74c', '#88b774', '#71b3c2', '#56a6ba', '#3c9ab2']) cmap_casual = ListedColormap(['#e8c520', '#e4b80d', '#e29e00', '#ea5c00', '#f22300']) colors_member_casual = {'Member': '#88b774', 'Casual': '#ea5c00'} colors_day_of_week = {'Monday': '#3c9ab2', 'Tuesday': '#71b3c2', 'Wednesday': '#88b774', 'Thursday': '#ffde45', 'Friday': '#e4b80d', 'Saturday': '#ea5c00', 'Sunday': '#f22300'} plot_examples([cmap_members])
code
104119002/cell_5
[ "text_html_output_1.png" ]
!pip install h3 !pip install geopandas
code
88081278/cell_4
[ "image_output_1.png" ]
import albumentations as A import cv2 import matplotlib.pyplot as plt import os import random import os import random from multiprocessing import Pool import numpy as np import cv2 import albumentations as A import matplotlib.pyplot as plt from tqdm import tqdm INPUT_PATH = '../input/happy-whale-and-dolphin' IMG_SIZE = 600 train_files = os.listdir(os.path.join(INPUT_PATH, 'train_images')) test_files = os.listdir(os.path.join(INPUT_PATH, 'test_images')) all_files = [os.path.join(INPUT_PATH, 'train_images', f) for f in train_files] + [os.path.join(INPUT_PATH, 'test_images', f) for f in test_files] show_images = random.sample(all_files, 5) def show_orig_norm_images(img_files, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): nrows, ncols = (len(img_files), 2) fig, ax = plt.subplots(nrows, ncols, figsize=(20, 31)) for i in range(len(img_files)): img = cv2.imread(img_files[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) norm_img = A.Normalize(mean=mean, std=std)(image=img)['image'] ax[i, 0].grid(False) ax[i, 0].axis('off') ax[i, 0].title.set_text(f'{os.path.basename(img_files[i])}: original') ax[i, 0].imshow(img) ax[i, 1].grid(False) ax[i, 1].axis('off') ax[i, 1].title.set_text(f'{os.path.basename(img_files[i])}: normalized') ax[i, 1].imshow(norm_img) plt.tight_layout() plt.show() show_orig_norm_images(show_images)
code
88081278/cell_3
[ "text_plain_output_1.png" ]
import os import random import os import random from multiprocessing import Pool import numpy as np import cv2 import albumentations as A import matplotlib.pyplot as plt from tqdm import tqdm INPUT_PATH = '../input/happy-whale-and-dolphin' IMG_SIZE = 600 train_files = os.listdir(os.path.join(INPUT_PATH, 'train_images')) test_files = os.listdir(os.path.join(INPUT_PATH, 'test_images')) all_files = [os.path.join(INPUT_PATH, 'train_images', f) for f in train_files] + [os.path.join(INPUT_PATH, 'test_images', f) for f in test_files] print(f'Train files: {len(train_files)}, test files: {len(test_files)}, all_files: {len(all_files)}') show_images = random.sample(all_files, 5) print(show_images)
code
88081278/cell_5
[ "application_vnd.jupyter.stderr_output_116.png", "application_vnd.jupyter.stderr_output_74.png", "application_vnd.jupyter.stderr_output_268.png", "application_vnd.jupyter.stderr_output_145.png", "application_vnd.jupyter.stderr_output_362.png", "application_vnd.jupyter.stderr_output_493.png", "application_vnd.jupyter.stderr_output_667.png", "application_vnd.jupyter.stderr_output_289.png", "application_vnd.jupyter.stderr_output_313.png", "application_vnd.jupyter.stderr_output_566.png", "application_vnd.jupyter.stderr_output_373.png", "application_vnd.jupyter.stderr_output_578.png", "application_vnd.jupyter.stderr_output_516.png", "application_vnd.jupyter.stderr_output_672.png", "application_vnd.jupyter.stderr_output_529.png", "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_732.png", "application_vnd.jupyter.stderr_output_222.png", "application_vnd.jupyter.stderr_output_626.png", "application_vnd.jupyter.stderr_output_96.png", "application_vnd.jupyter.stderr_output_115.png", "application_vnd.jupyter.stderr_output_642.png", "application_vnd.jupyter.stderr_output_207.png", "application_vnd.jupyter.stderr_output_640.png", "application_vnd.jupyter.stderr_output_341.png", "application_vnd.jupyter.stderr_output_723.png", "application_vnd.jupyter.stderr_output_296.png", "application_vnd.jupyter.stderr_output_110.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_637.png", "application_vnd.jupyter.stderr_output_112.png", "application_vnd.jupyter.stderr_output_400.png", "application_vnd.jupyter.stderr_output_212.png", "application_vnd.jupyter.stderr_output_499.png", "application_vnd.jupyter.stderr_output_700.png", "application_vnd.jupyter.stderr_output_458.png", "application_vnd.jupyter.stderr_output_634.png", "application_vnd.jupyter.stderr_output_420.png", "application_vnd.jupyter.stderr_output_77.png", "application_vnd.jupyter.stderr_output_24.png", "application_vnd.jupyter.stderr_output_354.png", "application_vnd.jupyter.stderr_output_417.png", "application_vnd.jupyter.stderr_output_16.png", "application_vnd.jupyter.stderr_output_274.png", "application_vnd.jupyter.stderr_output_610.png", "application_vnd.jupyter.stderr_output_461.png", "application_vnd.jupyter.stderr_output_205.png", "application_vnd.jupyter.stderr_output_632.png", "application_vnd.jupyter.stderr_output_203.png", "application_vnd.jupyter.stderr_output_368.png", "application_vnd.jupyter.stderr_output_575.png", "application_vnd.jupyter.stderr_output_185.png", "application_vnd.jupyter.stderr_output_474.png", "application_vnd.jupyter.stderr_output_227.png", "application_vnd.jupyter.stderr_output_258.png", "application_vnd.jupyter.stderr_output_668.png", "application_vnd.jupyter.stderr_output_622.png", "application_vnd.jupyter.stderr_output_287.png", "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_286.png", "application_vnd.jupyter.stderr_output_426.png", "application_vnd.jupyter.stderr_output_152.png", "application_vnd.jupyter.stderr_output_156.png", "application_vnd.jupyter.stderr_output_553.png", "application_vnd.jupyter.stderr_output_335.png", "application_vnd.jupyter.stderr_output_215.png", "application_vnd.jupyter.stderr_output_691.png", "application_vnd.jupyter.stderr_output_522.png", "application_vnd.jupyter.stderr_output_283.png", "application_vnd.jupyter.stderr_output_710.png", "application_vnd.jupyter.stderr_output_684.png", "application_vnd.jupyter.stderr_output_643.png", "application_vnd.jupyter.stderr_output_70.png", "application_vnd.jupyter.stderr_output_310.png", "application_vnd.jupyter.stderr_output_631.png", "application_vnd.jupyter.stderr_output_449.png", "application_vnd.jupyter.stderr_output_554.png", "application_vnd.jupyter.stderr_output_204.png", "application_vnd.jupyter.stderr_output_284.png", "application_vnd.jupyter.stderr_output_124.png", "application_vnd.jupyter.stderr_output_223.png", "application_vnd.jupyter.stderr_output_498.png", "application_vnd.jupyter.stderr_output_219.png", "application_vnd.jupyter.stderr_output_279.png", "application_vnd.jupyter.stderr_output_81.png", "application_vnd.jupyter.stderr_output_111.png", "application_vnd.jupyter.stderr_output_52.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_605.png", "application_vnd.jupyter.stderr_output_131.png", "application_vnd.jupyter.stderr_output_695.png", "application_vnd.jupyter.stderr_output_437.png", "application_vnd.jupyter.stderr_output_172.png", "application_vnd.jupyter.stderr_output_545.png", "application_vnd.jupyter.stderr_output_512.png", "application_vnd.jupyter.stderr_output_99.png", "application_vnd.jupyter.stderr_output_348.png", "application_vnd.jupyter.stderr_output_32.png", "application_vnd.jupyter.stderr_output_246.png", "application_vnd.jupyter.stderr_output_704.png", "application_vnd.jupyter.stderr_output_385.png", "application_vnd.jupyter.stderr_output_183.png", "application_vnd.jupyter.stderr_output_502.png", "application_vnd.jupyter.stderr_output_181.png", "application_vnd.jupyter.stderr_output_722.png", "application_vnd.jupyter.stderr_output_299.png", "application_vnd.jupyter.stderr_output_141.png", "application_vnd.jupyter.stderr_output_176.png", "application_vnd.jupyter.stderr_output_356.png", "application_vnd.jupyter.stderr_output_297.png", "application_vnd.jupyter.stderr_output_506.png", "application_vnd.jupyter.stderr_output_93.png", "application_vnd.jupyter.stderr_output_563.png", "application_vnd.jupyter.stderr_output_346.png", "application_vnd.jupyter.stderr_output_651.png", "application_vnd.jupyter.stderr_output_641.png", "application_vnd.jupyter.stderr_output_382.png", "application_vnd.jupyter.stderr_output_170.png", "application_vnd.jupyter.stderr_output_132.png", "application_vnd.jupyter.stderr_output_713.png", "application_vnd.jupyter.stderr_output_471.png", "application_vnd.jupyter.stderr_output_655.png", "application_vnd.jupyter.stderr_output_123.png", "application_vnd.jupyter.stderr_output_692.png", "application_vnd.jupyter.stderr_output_465.png", "application_vnd.jupyter.stderr_output_540.png", "application_vnd.jupyter.stderr_output_48.png", "application_vnd.jupyter.stderr_output_236.png", "application_vnd.jupyter.stderr_output_418.png", "application_vnd.jupyter.stderr_output_391.png", "application_vnd.jupyter.stderr_output_636.png", "application_vnd.jupyter.stderr_output_550.png", "application_vnd.jupyter.stderr_output_731.png", "application_vnd.jupyter.stderr_output_355.png", "application_vnd.jupyter.stderr_output_421.png", "application_vnd.jupyter.stderr_output_378.png", "application_vnd.jupyter.stderr_output_432.png", "application_vnd.jupyter.stderr_output_431.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_472.png", "application_vnd.jupyter.stderr_output_73.png", "application_vnd.jupyter.stderr_output_137.png", "application_vnd.jupyter.stderr_output_133.png", "application_vnd.jupyter.stderr_output_165.png", "application_vnd.jupyter.stderr_output_715.png", "application_vnd.jupyter.stderr_output_504.png", "application_vnd.jupyter.stderr_output_381.png", "application_vnd.jupyter.stderr_output_75.png", "application_vnd.jupyter.stderr_output_552.png", "application_vnd.jupyter.stderr_output_627.png", "application_vnd.jupyter.stderr_output_694.png", "application_vnd.jupyter.stderr_output_585.png", "application_vnd.jupyter.stderr_output_365.png", "application_vnd.jupyter.stderr_output_618.png", "application_vnd.jupyter.stderr_output_693.png", "application_vnd.jupyter.stderr_output_392.png", "application_vnd.jupyter.stderr_output_513.png", "application_vnd.jupyter.stderr_output_690.png", "application_vnd.jupyter.stderr_output_593.png", "application_vnd.jupyter.stderr_output_666.png", "application_vnd.jupyter.stderr_output_653.png", "application_vnd.jupyter.stderr_output_414.png", "application_vnd.jupyter.stderr_output_436.png", "application_vnd.jupyter.stderr_output_608.png", "application_vnd.jupyter.stderr_output_146.png", "application_vnd.jupyter.stderr_output_321.png", "application_vnd.jupyter.stderr_output_629.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_467.png", "application_vnd.jupyter.stderr_output_324.png", "application_vnd.jupyter.stderr_output_407.png", "application_vnd.jupyter.stderr_output_528.png", "application_vnd.jupyter.stderr_output_360.png", "application_vnd.jupyter.stderr_output_484.png", "application_vnd.jupyter.stderr_output_674.png", "application_vnd.jupyter.stderr_output_537.png", "application_vnd.jupyter.stderr_output_190.png", "application_vnd.jupyter.stderr_output_447.png", "application_vnd.jupyter.stderr_output_380.png", "application_vnd.jupyter.stderr_output_270.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_361.png", "application_vnd.jupyter.stderr_output_155.png", "application_vnd.jupyter.stderr_output_344.png", "application_vnd.jupyter.stderr_output_18.png", "application_vnd.jupyter.stderr_output_86.png", "application_vnd.jupyter.stderr_output_334.png", "application_vnd.jupyter.stderr_output_526.png", "application_vnd.jupyter.stderr_output_649.png", "application_vnd.jupyter.stderr_output_423.png", "application_vnd.jupyter.stderr_output_277.png", "application_vnd.jupyter.stderr_output_291.png", "application_vnd.jupyter.stderr_output_38.png", "application_vnd.jupyter.stderr_output_482.png", "application_vnd.jupyter.stderr_output_568.png", "application_vnd.jupyter.stderr_output_231.png", "application_vnd.jupyter.stderr_output_317.png", "application_vnd.jupyter.stderr_output_240.png", "application_vnd.jupyter.stderr_output_272.png", "application_vnd.jupyter.stderr_output_88.png", "application_vnd.jupyter.stderr_output_65.png", "application_vnd.jupyter.stderr_output_443.png", "application_vnd.jupyter.stderr_output_235.png", "application_vnd.jupyter.stderr_output_148.png", "application_vnd.jupyter.stderr_output_520.png", "application_vnd.jupyter.stderr_output_453.png", "application_vnd.jupyter.stderr_output_179.png", "application_vnd.jupyter.stderr_output_143.png", "application_vnd.jupyter.stderr_output_409.png", "application_vnd.jupyter.stderr_output_58.png", "application_vnd.jupyter.stderr_output_615.png", "application_vnd.jupyter.stderr_output_638.png", "application_vnd.jupyter.stderr_output_66.png", "application_vnd.jupyter.stderr_output_724.png", "application_vnd.jupyter.stderr_output_718.png", "application_vnd.jupyter.stderr_output_68.png", "application_vnd.jupyter.stderr_output_171.png", "application_vnd.jupyter.stderr_output_106.png", "application_vnd.jupyter.stderr_output_351.png", "application_vnd.jupyter.stderr_output_224.png", "application_vnd.jupyter.stderr_output_105.png", "application_vnd.jupyter.stderr_output_275.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_345.png", "application_vnd.jupyter.stderr_output_495.png", "application_vnd.jupyter.stderr_output_26.png", "application_vnd.jupyter.stderr_output_178.png", "application_vnd.jupyter.stderr_output_322.png", "application_vnd.jupyter.stderr_output_729.png", "application_vnd.jupyter.stderr_output_577.png", "application_vnd.jupyter.stderr_output_384.png", "application_vnd.jupyter.stderr_output_406.png", "application_vnd.jupyter.stderr_output_620.png", "application_vnd.jupyter.stderr_output_238.png", "application_vnd.jupyter.stderr_output_439.png", "application_vnd.jupyter.stderr_output_564.png", "application_vnd.jupyter.stderr_output_650.png", "application_vnd.jupyter.stderr_output_371.png", "application_vnd.jupyter.stderr_output_253.png", "application_vnd.jupyter.stderr_output_450.png", "application_vnd.jupyter.stderr_output_524.png", "application_vnd.jupyter.stderr_output_490.png", "application_vnd.jupyter.stderr_output_136.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_633.png", "application_vnd.jupyter.stderr_output_389.png", "application_vnd.jupyter.stderr_output_489.png", "application_vnd.jupyter.stderr_output_323.png", "application_vnd.jupyter.stderr_output_422.png", "application_vnd.jupyter.stderr_output_162.png", "application_vnd.jupyter.stderr_output_376.png", "application_vnd.jupyter.stderr_output_676.png", "application_vnd.jupyter.stderr_output_387.png", "application_vnd.jupyter.stderr_output_393.png", "application_vnd.jupyter.stderr_output_232.png", "application_vnd.jupyter.stderr_output_623.png", "application_vnd.jupyter.stderr_output_260.png", "application_vnd.jupyter.stderr_output_31.png", "application_vnd.jupyter.stderr_output_125.png", "application_vnd.jupyter.stderr_output_576.png", "application_vnd.jupyter.stderr_output_134.png", "application_vnd.jupyter.stderr_output_113.png", "application_vnd.jupyter.stderr_output_194.png", "application_vnd.jupyter.stderr_output_221.png", "application_vnd.jupyter.stderr_output_302.png", "application_vnd.jupyter.stderr_output_599.png", "application_vnd.jupyter.stderr_output_664.png", "application_vnd.jupyter.stderr_output_546.png", "application_vnd.jupyter.stderr_output_305.png", "application_vnd.jupyter.stderr_output_476.png", "application_vnd.jupyter.stderr_output_497.png", "application_vnd.jupyter.stderr_output_478.png", "application_vnd.jupyter.stderr_output_656.png", "application_vnd.jupyter.stderr_output_383.png", "application_vnd.jupyter.stderr_output_336.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_402.png", "application_vnd.jupyter.stderr_output_519.png", "application_vnd.jupyter.stderr_output_542.png", "application_vnd.jupyter.stderr_output_725.png", "application_vnd.jupyter.stderr_output_518.png", "application_vnd.jupyter.stderr_output_245.png", "application_vnd.jupyter.stderr_output_316.png", "application_vnd.jupyter.stderr_output_468.png", "application_vnd.jupyter.stderr_output_662.png", "application_vnd.jupyter.stderr_output_25.png", "application_vnd.jupyter.stderr_output_714.png", "application_vnd.jupyter.stderr_output_699.png", "application_vnd.jupyter.stderr_output_419.png", "application_vnd.jupyter.stderr_output_697.png", "application_vnd.jupyter.stderr_output_570.png", "application_vnd.jupyter.stderr_output_404.png", "application_vnd.jupyter.stderr_output_609.png", "application_vnd.jupyter.stderr_output_330.png", "application_vnd.jupyter.stderr_output_403.png", "application_vnd.jupyter.stderr_output_249.png", "application_vnd.jupyter.stderr_output_229.png", "application_vnd.jupyter.stderr_output_366.png", "application_vnd.jupyter.stderr_output_263.png", "application_vnd.jupyter.stderr_output_278.png", "application_vnd.jupyter.stderr_output_716.png", "application_vnd.jupyter.stderr_output_273.png", "application_vnd.jupyter.stderr_output_525.png", "application_vnd.jupyter.stderr_output_135.png", "application_vnd.jupyter.stderr_output_555.png", "application_vnd.jupyter.stderr_output_211.png", "application_vnd.jupyter.stderr_output_517.png", "application_vnd.jupyter.stderr_output_174.png", "application_vnd.jupyter.stderr_output_503.png", "application_vnd.jupyter.stderr_output_454.png", "application_vnd.jupyter.stderr_output_515.png", "application_vnd.jupyter.stderr_output_510.png", "application_vnd.jupyter.stderr_output_12.png", "application_vnd.jupyter.stderr_output_463.png", "application_vnd.jupyter.stderr_output_720.png", "application_vnd.jupyter.stderr_output_574.png", "application_vnd.jupyter.stderr_output_285.png", "application_vnd.jupyter.stderr_output_177.png", "application_vnd.jupyter.stderr_output_527.png", "application_vnd.jupyter.stderr_output_644.png", "application_vnd.jupyter.stderr_output_342.png", "application_vnd.jupyter.stderr_output_665.png", "application_vnd.jupyter.stderr_output_89.png", "application_vnd.jupyter.stderr_output_82.png", "application_vnd.jupyter.stderr_output_269.png", "application_vnd.jupyter.stderr_output_288.png", "application_vnd.jupyter.stderr_output_358.png", "application_vnd.jupyter.stderr_output_398.png", "application_vnd.jupyter.stderr_output_535.png", "application_vnd.jupyter.stderr_output_388.png", "application_vnd.jupyter.stderr_output_332.png", "application_vnd.jupyter.stderr_output_72.png", "application_vnd.jupyter.stderr_output_290.png", "application_vnd.jupyter.stderr_output_586.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_189.png", "application_vnd.jupyter.stderr_output_149.png", "application_vnd.jupyter.stderr_output_308.png", "application_vnd.jupyter.stderr_output_91.png", "application_vnd.jupyter.stderr_output_239.png", "application_vnd.jupyter.stderr_output_95.png", "application_vnd.jupyter.stderr_output_394.png", "application_vnd.jupyter.stderr_output_580.png", "application_vnd.jupyter.stderr_output_541.png", "application_vnd.jupyter.stderr_output_559.png", "application_vnd.jupyter.stderr_output_583.png", "application_vnd.jupyter.stderr_output_496.png", "application_vnd.jupyter.stderr_output_67.png", "application_vnd.jupyter.stderr_output_237.png", "application_vnd.jupyter.stderr_output_339.png", "application_vnd.jupyter.stderr_output_689.png", "application_vnd.jupyter.stderr_output_306.png", "application_vnd.jupyter.stderr_output_604.png", "application_vnd.jupyter.stderr_output_424.png", "application_vnd.jupyter.stderr_output_534.png", "application_vnd.jupyter.stderr_output_337.png", "application_vnd.jupyter.stderr_output_481.png", "application_vnd.jupyter.stderr_output_592.png", "application_vnd.jupyter.stderr_output_80.png", "application_vnd.jupyter.stderr_output_539.png", "application_vnd.jupyter.stderr_output_71.png", "application_vnd.jupyter.stderr_output_300.png", "application_vnd.jupyter.stderr_output_259.png", "application_vnd.jupyter.stderr_output_293.png", "application_vnd.jupyter.stderr_output_600.png", "application_vnd.jupyter.stderr_output_728.png", "application_vnd.jupyter.stderr_output_709.png", "application_vnd.jupyter.stderr_output_257.png", "application_vnd.jupyter.stderr_output_10.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_159.png", "application_vnd.jupyter.stderr_output_396.png", "application_vnd.jupyter.stderr_output_325.png", "application_vnd.jupyter.stderr_output_464.png", "application_vnd.jupyter.stderr_output_663.png", "application_vnd.jupyter.stderr_output_220.png", "application_vnd.jupyter.stderr_output_247.png", "application_vnd.jupyter.stderr_output_657.png", "application_vnd.jupyter.stderr_output_675.png", "application_vnd.jupyter.stderr_output_98.png", "application_vnd.jupyter.stderr_output_677.png", "application_vnd.jupyter.stderr_output_59.png", "application_vnd.jupyter.stderr_output_589.png", "application_vnd.jupyter.stderr_output_34.png", "application_vnd.jupyter.stderr_output_197.png", "application_vnd.jupyter.stderr_output_369.png", "application_vnd.jupyter.stderr_output_459.png", "application_vnd.jupyter.stderr_output_536.png", "application_vnd.jupyter.stderr_output_444.png", "application_vnd.jupyter.stderr_output_90.png", "application_vnd.jupyter.stderr_output_441.png", "application_vnd.jupyter.stderr_output_538.png", "application_vnd.jupyter.stderr_output_352.png", "application_vnd.jupyter.stderr_output_543.png", "application_vnd.jupyter.stderr_output_584.png", "application_vnd.jupyter.stderr_output_485.png", "application_vnd.jupyter.stderr_output_144.png", "application_vnd.jupyter.stderr_output_83.png", "application_vnd.jupyter.stderr_output_140.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_214.png", "application_vnd.jupyter.stderr_output_44.png", "application_vnd.jupyter.stderr_output_505.png", "application_vnd.jupyter.stderr_output_281.png", "application_vnd.jupyter.stderr_output_590.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_127.png", "application_vnd.jupyter.stderr_output_320.png", "application_vnd.jupyter.stderr_output_544.png", "application_vnd.jupyter.stderr_output_705.png", "application_vnd.jupyter.stderr_output_661.png", "application_vnd.jupyter.stderr_output_440.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_571.png", "application_vnd.jupyter.stderr_output_587.png", "application_vnd.jupyter.stderr_output_195.png", "application_vnd.jupyter.stderr_output_331.png", "application_vnd.jupyter.stderr_output_160.png", "application_vnd.jupyter.stderr_output_42.png", "application_vnd.jupyter.stderr_output_561.png", "application_vnd.jupyter.stderr_output_602.png", "application_vnd.jupyter.stderr_output_298.png", "application_vnd.jupyter.stderr_output_598.png", "application_vnd.jupyter.stderr_output_192.png", "application_vnd.jupyter.stderr_output_721.png", "application_vnd.jupyter.stderr_output_678.png", "application_vnd.jupyter.stderr_output_702.png", "application_vnd.jupyter.stderr_output_327.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_261.png", "application_vnd.jupyter.stderr_output_483.png", "application_vnd.jupyter.stderr_output_670.png", "application_vnd.jupyter.stderr_output_301.png", "application_vnd.jupyter.stderr_output_84.png", "application_vnd.jupyter.stderr_output_347.png", "application_vnd.jupyter.stderr_output_180.png", "application_vnd.jupyter.stderr_output_411.png", "application_vnd.jupyter.stderr_output_687.png", "application_vnd.jupyter.stderr_output_230.png", "application_vnd.jupyter.stderr_output_428.png", "application_vnd.jupyter.stderr_output_314.png", "application_vnd.jupyter.stderr_output_120.png", "application_vnd.jupyter.stderr_output_241.png", "application_vnd.jupyter.stderr_output_405.png", "application_vnd.jupyter.stderr_output_475.png", "application_vnd.jupyter.stderr_output_163.png", "application_vnd.jupyter.stderr_output_558.png", "application_vnd.jupyter.stderr_output_647.png", "application_vnd.jupyter.stderr_output_60.png", "application_vnd.jupyter.stderr_output_648.png", "application_vnd.jupyter.stderr_output_151.png", "application_vnd.jupyter.stderr_output_617.png", "application_vnd.jupyter.stderr_output_103.png", "application_vnd.jupyter.stderr_output_216.png", "application_vnd.jupyter.stderr_output_109.png", "application_vnd.jupyter.stderr_output_372.png", "application_vnd.jupyter.stderr_output_202.png", "application_vnd.jupyter.stderr_output_367.png", "application_vnd.jupyter.stderr_output_184.png", "application_vnd.jupyter.stderr_output_97.png", "application_vnd.jupyter.stderr_output_473.png", "application_vnd.jupyter.stderr_output_594.png", "application_vnd.jupyter.stderr_output_395.png", "application_vnd.jupyter.stderr_output_390.png", "application_vnd.jupyter.stderr_output_596.png", "application_vnd.jupyter.stderr_output_669.png", "application_vnd.jupyter.stderr_output_688.png", "application_vnd.jupyter.stderr_output_201.png", "application_vnd.jupyter.stderr_output_307.png", "application_vnd.jupyter.stderr_output_673.png", "application_vnd.jupyter.stderr_output_660.png", "application_vnd.jupyter.stderr_output_551.png", "application_vnd.jupyter.stderr_output_514.png", "application_vnd.jupyter.stderr_output_30.png", "application_vnd.jupyter.stderr_output_416.png", "application_vnd.jupyter.stderr_output_15.png", "application_vnd.jupyter.stderr_output_603.png", "application_vnd.jupyter.stderr_output_108.png", "application_vnd.jupyter.stderr_output_591.png", "application_vnd.jupyter.stderr_output_579.png", "application_vnd.jupyter.stderr_output_62.png", "application_vnd.jupyter.stderr_output_328.png", "application_vnd.jupyter.stderr_output_679.png", "application_vnd.jupyter.stderr_output_671.png", "application_vnd.jupyter.stderr_output_479.png", "application_vnd.jupyter.stderr_output_470.png", "application_vnd.jupyter.stderr_output_611.png", "application_vnd.jupyter.stderr_output_635.png", "application_vnd.jupyter.stderr_output_250.png", "application_vnd.jupyter.stderr_output_193.png", "application_vnd.jupyter.stderr_output_17.png", "application_vnd.jupyter.stderr_output_686.png", "application_vnd.jupyter.stderr_output_242.png", "application_vnd.jupyter.stderr_output_87.png", "application_vnd.jupyter.stderr_output_654.png", "application_vnd.jupyter.stderr_output_294.png", "application_vnd.jupyter.stderr_output_619.png", "application_vnd.jupyter.stderr_output_711.png", "application_vnd.jupyter.stderr_output_187.png", "application_vnd.jupyter.stderr_output_588.png", "application_vnd.jupyter.stderr_output_445.png", "application_vnd.jupyter.stderr_output_477.png", "application_vnd.jupyter.stderr_output_612.png", "application_vnd.jupyter.stderr_output_130.png", "application_vnd.jupyter.stderr_output_455.png", "application_vnd.jupyter.stderr_output_28.png", "application_vnd.jupyter.stderr_output_469.png", "application_vnd.jupyter.stderr_output_364.png", "application_vnd.jupyter.stderr_output_448.png", "application_vnd.jupyter.stderr_output_658.png", "application_vnd.jupyter.stderr_output_680.png", "application_vnd.jupyter.stderr_output_117.png", "application_vnd.jupyter.stderr_output_625.png", "application_vnd.jupyter.stderr_output_256.png", "application_vnd.jupyter.stderr_output_46.png", "application_vnd.jupyter.stderr_output_413.png", "application_vnd.jupyter.stderr_output_401.png", "application_vnd.jupyter.stderr_output_206.png", "application_vnd.jupyter.stderr_output_456.png", "application_vnd.jupyter.stderr_output_234.png", "application_vnd.jupyter.stderr_output_531.png", "application_vnd.jupyter.stderr_output_312.png", "application_vnd.jupyter.stderr_output_682.png", "application_vnd.jupyter.stderr_output_630.png", "application_vnd.jupyter.stderr_output_69.png", "application_vnd.jupyter.stderr_output_487.png", "application_vnd.jupyter.stderr_output_616.png", "application_vnd.jupyter.stderr_output_606.png", "application_vnd.jupyter.stderr_output_708.png", "application_vnd.jupyter.stderr_output_252.png", "application_vnd.jupyter.stderr_output_64.png", "application_vnd.jupyter.stderr_output_76.png", "application_vnd.jupyter.stderr_output_262.png", "application_vnd.jupyter.stderr_output_41.png", "application_vnd.jupyter.stderr_output_157.png", "application_vnd.jupyter.stderr_output_377.png", "application_vnd.jupyter.stderr_output_727.png", "application_vnd.jupyter.stderr_output_480.png", "application_vnd.jupyter.stderr_output_167.png", "application_vnd.jupyter.stderr_output_79.png", "application_vnd.jupyter.stderr_output_572.png", "application_vnd.jupyter.stderr_output_386.png", "application_vnd.jupyter.stderr_output_20.png", "application_vnd.jupyter.stderr_output_49.png", "application_vnd.jupyter.stderr_output_338.png", "application_vnd.jupyter.stderr_output_126.png", "application_vnd.jupyter.stderr_output_560.png", "application_vnd.jupyter.stderr_output_333.png", "application_vnd.jupyter.stderr_output_569.png", "application_vnd.jupyter.stderr_output_218.png", "application_vnd.jupyter.stderr_output_63.png", "application_vnd.jupyter.stderr_output_446.png", "application_vnd.jupyter.stderr_output_494.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_621.png", "application_vnd.jupyter.stderr_output_36.png", "application_vnd.jupyter.stderr_output_607.png", "application_vnd.jupyter.stderr_output_100.png", "application_vnd.jupyter.stderr_output_430.png", "application_vnd.jupyter.stderr_output_266.png", "application_vnd.jupyter.stderr_output_681.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_22.png", "application_vnd.jupyter.stderr_output_363.png", "application_vnd.jupyter.stderr_output_169.png", "application_vnd.jupyter.stderr_output_547.png", "application_vnd.jupyter.stderr_output_683.png", "application_vnd.jupyter.stderr_output_415.png", "application_vnd.jupyter.stderr_output_166.png", "application_vnd.jupyter.stderr_output_343.png", "application_vnd.jupyter.stderr_output_508.png", "application_vnd.jupyter.stderr_output_318.png", "application_vnd.jupyter.stderr_output_567.png", "application_vnd.jupyter.stderr_output_292.png", "application_vnd.jupyter.stderr_output_726.png", "application_vnd.jupyter.stderr_output_173.png", "application_vnd.jupyter.stderr_output_511.png", "application_vnd.jupyter.stderr_output_319.png", "application_vnd.jupyter.stderr_output_191.png", "application_vnd.jupyter.stderr_output_399.png", "application_vnd.jupyter.stderr_output_408.png", "application_vnd.jupyter.stderr_output_374.png", "application_vnd.jupyter.stderr_output_500.png", "application_vnd.jupyter.stderr_output_433.png", "application_vnd.jupyter.stderr_output_213.png", "application_vnd.jupyter.stderr_output_186.png", "application_vnd.jupyter.stderr_output_168.png", "application_vnd.jupyter.stderr_output_613.png", "application_vnd.jupyter.stderr_output_349.png", "application_vnd.jupyter.stderr_output_56.png", "application_vnd.jupyter.stderr_output_452.png", "application_vnd.jupyter.stderr_output_645.png", "application_vnd.jupyter.stderr_output_397.png", "application_vnd.jupyter.stderr_output_104.png", "application_vnd.jupyter.stderr_output_491.png", "application_vnd.jupyter.stderr_output_196.png", "application_vnd.jupyter.stderr_output_50.png", "application_vnd.jupyter.stderr_output_429.png", "application_vnd.jupyter.stderr_output_295.png", "application_vnd.jupyter.stderr_output_114.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_101.png", "application_vnd.jupyter.stderr_output_359.png", "application_vnd.jupyter.stderr_output_492.png", "application_vnd.jupyter.stderr_output_267.png", "application_vnd.jupyter.stderr_output_225.png", "application_vnd.jupyter.stderr_output_209.png", "application_vnd.jupyter.stderr_output_226.png", "application_vnd.jupyter.stderr_output_139.png", "application_vnd.jupyter.stderr_output_717.png", "application_vnd.jupyter.stderr_output_1.png", "application_vnd.jupyter.stderr_output_128.png", "application_vnd.jupyter.stderr_output_150.png", "application_vnd.jupyter.stderr_output_533.png", "application_vnd.jupyter.stderr_output_556.png", "application_vnd.jupyter.stderr_output_217.png", "application_vnd.jupyter.stderr_output_61.png", "application_vnd.jupyter.stderr_output_51.png", "application_vnd.jupyter.stderr_output_142.png", "application_vnd.jupyter.stderr_output_326.png", "application_vnd.jupyter.stderr_output_311.png", "application_vnd.jupyter.stderr_output_304.png", "application_vnd.jupyter.stderr_output_353.png", "application_vnd.jupyter.stderr_output_530.png", "application_vnd.jupyter.stderr_output_138.png", "application_vnd.jupyter.stderr_output_412.png", "application_vnd.jupyter.stderr_output_548.png", "application_vnd.jupyter.stderr_output_161.png", "application_vnd.jupyter.stderr_output_379.png", "application_vnd.jupyter.stderr_output_200.png", "application_vnd.jupyter.stderr_output_427.png", "application_vnd.jupyter.stderr_output_280.png", "application_vnd.jupyter.stderr_output_707.png", "application_vnd.jupyter.stderr_output_122.png", "application_vnd.jupyter.stderr_output_488.png", "application_vnd.jupyter.stderr_output_624.png", "application_vnd.jupyter.stderr_output_94.png", "application_vnd.jupyter.stderr_output_233.png", "application_vnd.jupyter.stderr_output_153.png", "application_vnd.jupyter.stderr_output_282.png", "application_vnd.jupyter.stderr_output_730.png", "application_vnd.jupyter.stderr_output_45.png", "application_vnd.jupyter.stderr_output_659.png", "application_vnd.jupyter.stderr_output_462.png", "application_vnd.jupyter.stderr_output_639.png", "application_vnd.jupyter.stderr_output_652.png", "application_vnd.jupyter.stderr_output_182.png", "application_vnd.jupyter.stderr_output_158.png", "application_vnd.jupyter.stderr_output_425.png", "application_vnd.jupyter.stderr_output_78.png", "application_vnd.jupyter.stderr_output_509.png", "application_vnd.jupyter.stderr_output_698.png", "application_vnd.jupyter.stderr_output_370.png", "application_vnd.jupyter.stderr_output_175.png", "application_vnd.jupyter.stderr_output_276.png", "application_vnd.jupyter.stderr_output_188.png", "application_vnd.jupyter.stderr_output_601.png", "application_vnd.jupyter.stderr_output_696.png", "application_vnd.jupyter.stderr_output_457.png", "application_vnd.jupyter.stderr_output_14.png", "application_vnd.jupyter.stderr_output_562.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_119.png", "application_vnd.jupyter.stderr_output_581.png", "application_vnd.jupyter.stderr_output_309.png", "application_vnd.jupyter.stderr_output_107.png", "application_vnd.jupyter.stderr_output_255.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_523.png", "application_vnd.jupyter.stderr_output_733.png", "application_vnd.jupyter.stderr_output_719.png", "application_vnd.jupyter.stderr_output_43.png", "application_vnd.jupyter.stderr_output_357.png", "application_vnd.jupyter.stderr_output_595.png", "application_vnd.jupyter.stderr_output_706.png", "application_vnd.jupyter.stderr_output_265.png", "application_vnd.jupyter.stderr_output_350.png", "application_vnd.jupyter.stderr_output_685.png", "application_vnd.jupyter.stderr_output_54.png", "application_vnd.jupyter.stderr_output_85.png", "application_vnd.jupyter.stderr_output_521.png", "application_vnd.jupyter.stderr_output_118.png", "application_vnd.jupyter.stderr_output_154.png", "application_vnd.jupyter.stderr_output_438.png", "application_vnd.jupyter.stderr_output_442.png", "application_vnd.jupyter.stderr_output_198.png", "application_vnd.jupyter.stderr_output_712.png", "application_vnd.jupyter.stderr_output_597.png", "application_vnd.jupyter.stderr_output_460.png", "application_vnd.jupyter.stderr_output_549.png", "application_vnd.jupyter.stderr_output_271.png", "application_vnd.jupyter.stderr_output_55.png", "application_vnd.jupyter.stderr_output_501.png", "application_vnd.jupyter.stderr_output_228.png", "application_vnd.jupyter.stderr_output_303.png", "application_vnd.jupyter.stderr_output_614.png", "application_vnd.jupyter.stderr_output_254.png", "application_vnd.jupyter.stderr_output_147.png", "application_vnd.jupyter.stderr_output_375.png", "application_vnd.jupyter.stderr_output_121.png", "application_vnd.jupyter.stderr_output_582.png", "application_vnd.jupyter.stderr_output_451.png", "application_vnd.jupyter.stderr_output_628.png", "application_vnd.jupyter.stderr_output_466.png", "application_vnd.jupyter.stderr_output_703.png", "application_vnd.jupyter.stderr_output_340.png", "application_vnd.jupyter.stderr_output_329.png", "application_vnd.jupyter.stderr_output_208.png", "application_vnd.jupyter.stderr_output_243.png", "application_vnd.jupyter.stderr_output_199.png", "application_vnd.jupyter.stderr_output_565.png", "application_vnd.jupyter.stderr_output_248.png", "application_vnd.jupyter.stderr_output_573.png", "application_vnd.jupyter.stderr_output_210.png", "application_vnd.jupyter.stderr_output_92.png", "application_vnd.jupyter.stderr_output_164.png", "application_vnd.jupyter.stderr_output_129.png", "application_vnd.jupyter.stderr_output_102.png", "application_vnd.jupyter.stderr_output_507.png", "application_vnd.jupyter.stderr_output_251.png", "application_vnd.jupyter.stderr_output_557.png", "application_vnd.jupyter.stderr_output_410.png", "application_vnd.jupyter.stderr_output_40.png", "application_vnd.jupyter.stderr_output_315.png", "application_vnd.jupyter.stderr_output_532.png", "application_vnd.jupyter.stderr_output_37.png", "application_vnd.jupyter.stderr_output_244.png", "application_vnd.jupyter.stderr_output_701.png", "application_vnd.jupyter.stderr_output_264.png", "application_vnd.jupyter.stderr_output_486.png", "application_vnd.jupyter.stderr_output_646.png", "application_vnd.jupyter.stderr_output_434.png" ]
from multiprocessing import Pool from tqdm import tqdm import albumentations as A import cv2 import matplotlib.pyplot as plt import numpy as np import os import random import os import random from multiprocessing import Pool import numpy as np import cv2 import albumentations as A import matplotlib.pyplot as plt from tqdm import tqdm INPUT_PATH = '../input/happy-whale-and-dolphin' IMG_SIZE = 600 train_files = os.listdir(os.path.join(INPUT_PATH, 'train_images')) test_files = os.listdir(os.path.join(INPUT_PATH, 'test_images')) all_files = [os.path.join(INPUT_PATH, 'train_images', f) for f in train_files] + [os.path.join(INPUT_PATH, 'test_images', f) for f in test_files] show_images = random.sample(all_files, 5) def show_orig_norm_images(img_files, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): nrows, ncols = len(img_files), 2 fig, ax = plt.subplots(nrows, ncols, figsize=(20,31)) for i in range(len(img_files)): img = cv2.imread(img_files[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) norm_img = A.Normalize(mean=mean, std=std)(image=img)['image'] ax[i, 0].grid(False) ax[i, 0].axis('off') ax[i, 0].title.set_text(f'{os.path.basename(img_files[i])}: original') ax[i, 0].imshow(img) ax[i, 1].grid(False) ax[i, 1].axis('off') ax[i, 1].title.set_text(f'{os.path.basename(img_files[i])}: normalized') ax[i, 1].imshow(norm_img) plt.tight_layout() plt.show() show_orig_norm_images(show_images) np.set_printoptions(precision=3) def process_file(fp): img = cv2.imread(fp) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = img / 255 return (np.mean(img, axis=(0, 1)), np.std(img, axis=(0, 1))) mean, std = (np.zeros(3), np.zeros(3)) n, done = (len(all_files), 0) with Pool(os.cpu_count()) as p: pbar = tqdm(p.imap(process_file, all_files), total=n) for m, s in pbar: done += 1 mean += m std += s pbar.set_description(f'{mean / done} {std / done}') mean, std = (mean / n, std / n) print(mean, std)
code
88080932/cell_21
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_val, y_pred) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize=(10, 10)) disp.plot(ax=ax) plt.show()
code
88080932/cell_13
[ "text_plain_output_1.png" ]
X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape)
code
88080932/cell_9
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') from sklearn.model_selection import train_test_split train_df, test_df = train_test_split(wine_quality_df, test_size=0.2, stratify=wine_quality_df['quality']) train_df, val_df = train_test_split(train_df, test_size=1 / 8, stratify=train_df['quality']) (len(train_df), len(val_df), len(test_df))
code
88080932/cell_4
[ "image_output_1.png" ]
import pandas as pd wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') wine_quality_df.info()
code
88080932/cell_30
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 # plot confusion matrix import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_val, y_pred) disp = ConfusionMatrixDisplay(confusion_matrix = cm, display_labels = [3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize = (10, 10)) disp.plot(ax = ax) plt.show() from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_val, y_pred) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize=(10, 10)) disp.plot(ax=ax) plt.show()
code
88080932/cell_20
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 print(f'true values: \n{np.array(y_val[:20])} \n\npredicted values: \n{y_pred[:20]}')
code
88080932/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') wine_quality_df['quality'].value_counts()
code
88080932/cell_29
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 print(f'true values: \n{np.array(y_val[:20])} \n\npredicted values: \n{y_pred[:20]}')
code
88080932/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} print(weights_dict)
code
88080932/cell_11
[ "text_plain_output_1.png" ]
train_df.describe()
code
88080932/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd import tensorflow as tf wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) pd.DataFrame(history.history).plot()
code
88080932/cell_18
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled))
code
88080932/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 # plot confusion matrix import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_val, y_pred) disp = ConfusionMatrixDisplay(confusion_matrix = cm, display_labels = [3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize = (10, 10)) disp.plot(ax = ax) plt.show() from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 # plot confusion matrix import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_val, y_pred) disp = ConfusionMatrixDisplay(confusion_matrix = cm, display_labels = [3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize = (10, 10)) disp.plot(ax = ax) plt.show() y_pred_test = tf.argmax(model.predict(X_test_scaled), axis=1) + 3 import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay cm = confusion_matrix(y_test, y_pred_test) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[3, 4, 5, 6, 7, 8]) fig, ax = plt.subplots(figsize=(10, 10)) disp.plot(ax=ax) plt.show()
code
88080932/cell_28
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import pandas as pd import tensorflow as tf wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict) pd.DataFrame(history.history).plot()
code
88080932/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd wine_quality_df = pd.read_csv('/kaggle/input/wine-quality-dataset/WineQT.csv', index_col='Id') wine_quality_df.head()
code
88080932/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 y_pred_test = tf.argmax(model.predict(X_test_scaled), axis=1) + 3 print(f'true values: \n{np.array(y_test[:20])} \n\npredicted values: \n{y_pred_test[:20]}')
code
88080932/cell_24
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) for label, weight in zip(np.unique(y_train), weights): print(f'label: {label}, number of instances: {len(y_train[y_train == label])}, weight: {weight}')
code
88080932/cell_10
[ "text_html_output_1.png" ]
train_df.corr()['quality'].plot.bar()
code
88080932/cell_27
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.utils.class_weight import compute_class_weight import numpy as np import tensorflow as tf X_train, y_train = (train_df.drop('quality', axis=1), train_df['quality']) X_val, y_val = (val_df.drop('quality', axis=1), val_df['quality']) X_test, y_test = (test_df.drop('quality', axis=1), test_df['quality']) (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_val_scaled = scaler.transform(X_val) X_test_scaled = scaler.transform(X_test) y_train_scaled = y_train - 3 y_val_scaled = y_val - 3 y_test_scaled = y_test - 3 model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=30, validation_data=(X_val_scaled, y_val_scaled)) y_pred = tf.argmax(model.predict(X_val_scaled), axis=1) + 3 from sklearn.utils.class_weight import compute_class_weight weights = compute_class_weight(class_weight='balanced', classes=np.unique(y_train), y=y_train) weights_dict = {i: weights[i] for i in range(len(weights))} model = tf.keras.Sequential([tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.8), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dropout(0.3), tf.keras.layers.Dense(6, activation='softmax')]) model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), metrics=['accuracy']) history = model.fit(X_train_scaled, y_train_scaled, epochs=400, validation_data=(X_val_scaled, y_val_scaled), class_weight=weights_dict)
code
106210100/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) client.list_rows(bikeshare_trips, max_results=5).to_dataframe() query1 = '\n SELECT r.name AS region, COUNT(si.station_id) AS station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_station_info` AS si\n INNER JOIN `bigquery-public-data.san_francisco_bikeshare.bikeshare_regions` AS r\n USING (region_id)\n GROUP BY region\n ORDER BY station_count DESC;\n ' query_job1 = client.query(query1) query_result1 = query_job1.to_dataframe() query2a = '\n SELECT start_station_name, COUNT(start_station_name) AS start_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY start_station_name\n ORDER BY start_station_count DESC;\n ' query_job2a = client.query(query2a) query_result2a = query_job2a.to_dataframe() query2b = '\n SELECT end_station_name, COUNT(end_station_name) AS end_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY end_station_name\n ORDER BY end_station_count DESC;\n ' query_job2b = client.query(query2b) query_result2b = query_job2b.to_dataframe() query2c = '\n WITH s AS (\n SELECT start_station_name, COUNT(start_station_name) AS start_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY start_station_name\n ORDER BY start_station_count DESC\n),\ne AS (\n SELECT end_station_name, COUNT(end_station_name) AS end_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY end_station_name\n ORDER BY end_station_count DESC\n)\nSELECT s.start_station_name AS station_name, s.start_station_count AS starting_count, e.end_station_count AS ending_count\nFROM s \nINNER JOIN e\n ON s.start_station_name = e.end_station_name\nORDER BY start_station_count DESC, end_station_count DESC;\n ' query_job2c = client.query(query2c) query_result2c = query_job2c.to_dataframe() print(query_result2c)
code
106210100/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) client.list_rows(bikeshare_trips, max_results=5).to_dataframe() query1 = '\n SELECT r.name AS region, COUNT(si.station_id) AS station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_station_info` AS si\n INNER JOIN `bigquery-public-data.san_francisco_bikeshare.bikeshare_regions` AS r\n USING (region_id)\n GROUP BY region\n ORDER BY station_count DESC;\n ' query_job1 = client.query(query1) query_result1 = query_job1.to_dataframe() print(query_result1)
code
106210100/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) for table in tables: print(table.table_id)
code
106210100/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) print('bikeshare_trips SCHEMA:', '\n', bikeshare_trips.schema)
code
106210100/cell_2
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client()
code
106210100/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) client.list_rows(bikeshare_trips, max_results=5).to_dataframe() query1 = '\n SELECT r.name AS region, COUNT(si.station_id) AS station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_station_info` AS si\n INNER JOIN `bigquery-public-data.san_francisco_bikeshare.bikeshare_regions` AS r\n USING (region_id)\n GROUP BY region\n ORDER BY station_count DESC;\n ' query_job1 = client.query(query1) query_result1 = query_job1.to_dataframe() query2a = '\n SELECT start_station_name, COUNT(start_station_name) AS start_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY start_station_name\n ORDER BY start_station_count DESC;\n ' query_job2a = client.query(query2a) query_result2a = query_job2a.to_dataframe() print(query_result2a)
code
106210100/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) client.list_rows(bikeshare_trips, max_results=5).to_dataframe()
code
106210100/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('san_francisco_bikeshare', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref_regions = dataset_ref.table('bikeshare_regions') table_ref_station_info = dataset_ref.table('bikeshare_station_info') table_ref_station_status = dataset_ref.table('bikeshare_station_status') table_ref_trips = dataset_ref.table('bikeshare_trips') bikeshare_regions = client.get_table(table_ref_regions) bikeshare_station_info = client.get_table(table_ref_station_info) bikeshare_station_status = client.get_table(table_ref_station_status) bikeshare_trips = client.get_table(table_ref_trips) client.list_rows(bikeshare_trips, max_results=5).to_dataframe() query1 = '\n SELECT r.name AS region, COUNT(si.station_id) AS station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_station_info` AS si\n INNER JOIN `bigquery-public-data.san_francisco_bikeshare.bikeshare_regions` AS r\n USING (region_id)\n GROUP BY region\n ORDER BY station_count DESC;\n ' query_job1 = client.query(query1) query_result1 = query_job1.to_dataframe() query2a = '\n SELECT start_station_name, COUNT(start_station_name) AS start_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY start_station_name\n ORDER BY start_station_count DESC;\n ' query_job2a = client.query(query2a) query_result2a = query_job2a.to_dataframe() query2b = '\n SELECT end_station_name, COUNT(end_station_name) AS end_station_count\n FROM `bigquery-public-data.san_francisco_bikeshare.bikeshare_trips`\n GROUP BY end_station_name\n ORDER BY end_station_count DESC;\n ' query_job2b = client.query(query2b) query_result2b = query_job2b.to_dataframe() print(query_result2b)
code
90104537/cell_13
[ "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() g = sns.barplot(data=df.groupby([df.start.dt.year, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Year') plt.ylabel('Avg steps number'); plt.title('Average steps number by year'); f = sns.barplot(data=df.groupby([df.start.dt.month, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Month') plt.ylabel('Avg steps number'); plt.title('Average steps number by month'); f.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']); f = sns.barplot(data=df.groupby([df.start.dt.weekday, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Weekday') plt.ylabel('Avg steps number'); plt.title('Average steps number per weekday'); f.set_xticklabels(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']); g = sns.barplot(data=df.groupby([df.start.dt.hour, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Hour') plt.ylabel('Avg steps number') plt.title('Average number of steps by hour')
code
90104537/cell_4
[ "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() sns.lineplot(x='start', y='value', data=df_temp, hue='user_id') plt.xlabel('Date') plt.ylabel('Steps Count') plt.title('Steps number per day')
code
90104537/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() g = sns.barplot(data=df.groupby([df.start.dt.year, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Year') plt.ylabel('Avg steps number') plt.title('Average steps number by year')
code
90104537/cell_8
[ "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() g = sns.barplot(data=df.groupby([df.start.dt.year, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Year') plt.ylabel('Avg steps number'); plt.title('Average steps number by year'); f = sns.barplot(data=df.groupby([df.start.dt.month, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Month') plt.ylabel('Avg steps number') plt.title('Average steps number by month') f.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
code
90104537/cell_10
[ "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() g = sns.barplot(data=df.groupby([df.start.dt.year, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Year') plt.ylabel('Avg steps number'); plt.title('Average steps number by year'); f = sns.barplot(data=df.groupby([df.start.dt.month, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Month') plt.ylabel('Avg steps number'); plt.title('Average steps number by month'); f.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']); f = sns.barplot(data=df.groupby([df.start.dt.weekday, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Weekday') plt.ylabel('Avg steps number') plt.title('Average steps number per weekday') f.set_xticklabels(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])
code
90104537/cell_12
[ "image_output_1.png" ]
import matplotlib.pylab as pylab import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import matplotlib.pyplot as plt import numpy as np import seaborn as sns import matplotlib.pylab as pylab params = {'legend.fontsize': 'x-large', 'figure.figsize': (15, 10), 'axes.labelsize': 'x-large', 'axes.titlesize': 'x-large', 'xtick.labelsize': 'x-large', 'ytick.labelsize': 'x-large'} pylab.rcParams.update(params) plt.style.use('dark_background') df = pd.read_csv('../input/step-count-from-phone-app/data.csv') df['start'] = pd.to_datetime(df['start']) df['end'] = pd.to_datetime(df['end']) df_temp = df.copy() df_temp['start'] = df_temp['start'].dt.round('24h') df_temp = df_temp.groupby(['user_id', 'start']).sum().reset_index() g = sns.barplot(data=df.groupby([df.start.dt.year, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Year') plt.ylabel('Avg steps number'); plt.title('Average steps number by year'); f = sns.barplot(data=df.groupby([df.start.dt.month, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Month') plt.ylabel('Avg steps number'); plt.title('Average steps number by month'); f.set_xticklabels(['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']); f = sns.barplot(data=df.groupby([df.start.dt.weekday, 'user_id']).mean().reset_index(), x='start', y='value', hue='user_id') plt.xlabel('Weekday') plt.ylabel('Avg steps number'); plt.title('Average steps number per weekday'); f.set_xticklabels(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']); sns.catplot(x='start', y='value', col='user_id', data=df.groupby([df.start.dt.hour, 'user_id']).mean().reset_index(), kind='bar', col_wrap=3)
code
32071924/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd train_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test_df = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/submission.csv') train_df.isna().sum() test_df.isna().sum() all_data = pd.concat([train_df, test_df], axis=0, sort=False) all_data['Province_State'].fillna('None', inplace=True) all_data['ConfirmedCases'].fillna(0, inplace=True) all_data['Fatalities'].fillna(0, inplace=True) all_data['Id'].fillna(-1, inplace=True) all_data['ForecastId'].fillna(-1, inplace=True) from sklearn.preprocessing import LabelEncoder le = LabelEncoder() all_data['Date'] = pd.to_datetime(all_data['Date']) all_data['Day_num'] = le.fit_transform(all_data.Date) all_data['Day'] = all_data['Date'].dt.day all_data['Month'] = all_data['Date'].dt.month all_data['Year'] = all_data['Date'].dt.year train = all_data[all_data['ForecastId'] == -1.0] test = all_data[all_data['ForecastId'] != -1.0] print(train.shape) train.head()
code