path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2042602/cell_9
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) plt.scatter(y_test, predictions)
code
2042602/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib import style from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style style.use('fivethirtyeight') import sklearn from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import seaborn as sns from sklearn import metrics from subprocess import check_output data = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') data = data.dropna() test = test.dropna() data.describe()
code
2042602/cell_2
[ "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style style.use('fivethirtyeight') import sklearn from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import seaborn as sns from sklearn import metrics from subprocess import check_output data = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') print(data.shape) print(test.shape)
code
2042602/cell_11
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) print(metrics.mean_absolute_error(y_test, predictions))
code
2042602/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style style.use('fivethirtyeight') import sklearn from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import seaborn as sns from sklearn import metrics from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) data = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') data.describe()
code
2042602/cell_7
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train)
code
2042602/cell_8
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) print(predictions)
code
2042602/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib import style from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import style style.use('fivethirtyeight') import sklearn from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split import seaborn as sns from sklearn import metrics from subprocess import check_output data = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') data = data.dropna() test = test.dropna() print(data.shape) print(test.shape)
code
2042602/cell_10
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import seaborn as sns clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) sns.distplot(y_test - predictions)
code
2042602/cell_12
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LinearRegression clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) print(metrics.mean_squared_error(y_test, predictions))
code
72109830/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df_water = pd.read_csv('../input/water-potability/water_potability.csv') dict_cl_min = {'ph': 6.5, 'Hardness': 151, 'Solids': 0, 'Chloramines': 0, 'Sulfate': 0, 'Conductivity': 0, 'Organic_carbon': 0, 'Trihalomethanes': 0, 'Turbidity': 0} dict_cl_max = {'ph': 8.5, 'Hardness': 300, 'Solids': 1200, 'Chloramines': 4, 'Sulfate': 250, 'Conductivity': 400, 'Organic_carbon': 10, 'Trihalomethanes': 80, 'Turbidity': 5} set_cl_primary = {'Chloramines', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity'} set_cl_secondary = {'ph', 'Hardness', 'Solids', 'Sulfate'} df_cl_filter_applied_by_col = pd.DataFrame() for col, min_val in dict_cl_min.items(): df_cl_filter_applied_by_col[col] = df_water[col] >= min_val for col, max_val in dict_cl_max.items(): df_cl_filter_applied_by_col[col] = df_cl_filter_applied_by_col[col] & (df_water[col] <= max_val) df_cl_filter_applied_all = df_cl_filter_applied_by_col.all(axis=1) print('all filters result:', df_cl_filter_applied_all.value_counts(False), sep='\r\n') df_cl_filter_applied_primary = df_cl_filter_applied_by_col[set_cl_primary].all(axis=1) print('primary filters result:', df_cl_filter_applied_primary.value_counts(False), sep='\r\n') df_cl_filter_applied_secondary = df_cl_filter_applied_by_col[set_cl_secondary].all(axis=1) print('secondary filters result:', df_cl_filter_applied_secondary.value_counts(False), sep='\r\n') print(df_water[df_cl_filter_applied_primary])
code
72109830/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df_water = pd.read_csv('../input/water-potability/water_potability.csv') df_water.describe()
code
72109830/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd df_water = pd.read_csv('../input/water-potability/water_potability.csv') dict_cl_min = {'ph': 6.5, 'Hardness': 151, 'Solids': 0, 'Chloramines': 0, 'Sulfate': 0, 'Conductivity': 0, 'Organic_carbon': 0, 'Trihalomethanes': 0, 'Turbidity': 0} dict_cl_max = {'ph': 8.5, 'Hardness': 300, 'Solids': 1200, 'Chloramines': 4, 'Sulfate': 250, 'Conductivity': 400, 'Organic_carbon': 10, 'Trihalomethanes': 80, 'Turbidity': 5} set_cl_primary = {'Chloramines', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity'} set_cl_secondary = {'ph', 'Hardness', 'Solids', 'Sulfate'} df_cl_filter_applied_by_col = pd.DataFrame() for col, min_val in dict_cl_min.items(): df_cl_filter_applied_by_col[col] = df_water[col] >= min_val for col, max_val in dict_cl_max.items(): df_cl_filter_applied_by_col[col] = df_cl_filter_applied_by_col[col] & (df_water[col] <= max_val) print(df_cl_filter_applied_by_col[col].value_counts()) df_cl_filter_applied_by_col.head()
code
73080358/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) X = features.copy() print(X.shape) X_test = test.copy() print(X_test.shape) categorical_cols = [cname for cname in features.columns if features[cname].dtype == 'object'] numerical_cols = [cname for cname in features.columns if features[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols
code
73080358/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) X = features.copy() X_test = test.copy() categorical_cols = [cname for cname in features.columns if features[cname].dtype == 'object'] numerical_cols = [cname for cname in features.columns if features[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols ordinal_encoder = OrdinalEncoder() print('Before OEing: ', X.shape, X_test.shape) X[categorical_cols] = ordinal_encoder.fit_transform(features[categorical_cols]) X_test[categorical_cols] = ordinal_encoder.transform(test[categorical_cols]) print('After OEing: ', X.shape, X_test.shape) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=17) print(X_train.shape) print(X_valid.shape)
code
73080358/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder from tpot import TPOTRegressor import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) X = features.copy() X_test = test.copy() categorical_cols = [cname for cname in features.columns if features[cname].dtype == 'object'] numerical_cols = [cname for cname in features.columns if features[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols ordinal_encoder = OrdinalEncoder() X[categorical_cols] = ordinal_encoder.fit_transform(features[categorical_cols]) X_test[categorical_cols] = ordinal_encoder.transform(test[categorical_cols]) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=17) scoring_function = 'accuracy' tpot_rgr = TPOTRegressor(scoring=scoring_function, verbosity=2, random_state=42, cv=5, n_jobs=-1) tpot_rgr.fit(X_train, y_train) print(tpot_rgr.score(X_valid, y_valid))
code
73080358/cell_10
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import OrdinalEncoder from sklearn.preprocessing import StandardScaler import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) X = features.copy() X_test = test.copy() categorical_cols = [cname for cname in features.columns if features[cname].dtype == 'object'] numerical_cols = [cname for cname in features.columns if features[cname].dtype in ['int64', 'float64']] my_cols = categorical_cols + numerical_cols ordinal_encoder = OrdinalEncoder() X[categorical_cols] = ordinal_encoder.fit_transform(features[categorical_cols]) X_test[categorical_cols] = ordinal_encoder.transform(test[categorical_cols]) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=17) print('before Scaling:', X['cont0'].mean(), X_test['cont0'].mean()) scaler = StandardScaler() X[numerical_cols] = scaler.fit_transform(X[numerical_cols]) X_test[numerical_cols] = scaler.fit_transform(X_test[numerical_cols]) print('after Scaling:', X['cont0'].mean(), X_test['cont0'].mean())
code
74052792/cell_6
[ "image_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import string import os from collections import Counter from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder from sklearn.pipeline import Pipeline sns.set(style='white', context='notebook', palette='magma') markdown_data = pd.read_csv('../input/titanic/train.csv') final_approval_data = pd.read_csv('../input/titanic/test.csv') passenger_id_final = final_approval_data['PassengerId'] def detect_outliers(df, n, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers Outliers_to_drop = detect_outliers(markdown_data, 2, ['Age', 'SibSp', 'Parch', 'Fare']) markdown_data.loc[Outliers_to_drop] markdown_data = markdown_data.drop(Outliers_to_drop).reset_index(drop=True) sns.heatmap(markdown_data.corr(), annot=True)
code
74052792/cell_11
[ "text_html_output_1.png" ]
from collections import Counter from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder import numpy as np import pandas as pd import seaborn as sns import string import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import string import os from collections import Counter from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.linear_model import LogisticRegression from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.neural_network import MLPClassifier from sklearn.svm import SVC from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder from sklearn.pipeline import Pipeline sns.set(style='white', context='notebook', palette='magma') markdown_data = pd.read_csv('../input/titanic/train.csv') final_approval_data = pd.read_csv('../input/titanic/test.csv') passenger_id_final = final_approval_data['PassengerId'] def detect_outliers(df, n, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers Outliers_to_drop = detect_outliers(markdown_data, 2, ['Age', 'SibSp', 'Parch', 'Fare']) markdown_data.loc[Outliers_to_drop] markdown_data = markdown_data.drop(Outliers_to_drop).reset_index(drop=True) train_len = len(markdown_data) data = pd.concat([markdown_data, final_approval_data], axis=0).reset_index(drop=True) data.isnull().sum() all_data = [markdown_data, final_approval_data] for dataset in all_data: dataset['Embarked'] = dataset['Embarked'].fillna(dataset['Embarked'].mode()[0]) dataset['Fare'] = dataset['Fare'].fillna(dataset['Fare'].median()) dataset['Cabin'] = dataset['Cabin'].fillna('M') index_NaN_age = list(dataset['Age'][dataset['Age'].isnull()].index) for i in index_NaN_age: age_med = dataset['Age'].median() age_pred = dataset['Age'][(dataset['SibSp'] == dataset.iloc[i]['SibSp']) & (dataset['Pclass'] == dataset.iloc[i]['Pclass'])].median() if not np.isnan(age_pred): dataset.loc[i, 'Age'] = age_pred else: dataset.loc[i, 'Age'] = age_med def is_fare(col): col.loc[(col <= 20) & (col >= 100)] = 0 return col def is_age(col): col.loc[(col <= 20) & (col >= 50)] = 0 return col def extract_surname(data): families = [] for i in range(len(data)): name = data.iloc[i] if '(' in name: name_no_bracket = name.split('(')[0] else: name_no_bracket = name family = name_no_bracket.split(',')[0] title = name_no_bracket.split(',')[1].strip().split(' ')[0] for c in string.punctuation: family = family.replace(c, '').strip() families.append(family) return families all_data = [markdown_data, final_approval_data] for dataset in all_data: dataset['Family_size'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset['Family'] = extract_surname(dataset['Name']) dataset['Deck'] = dataset['Cabin'].map(lambda str: str[0]) dataset.loc[dataset[dataset['Deck'] == 'T'].index, 'Deck'] = 'A' dataset['Deck'] = dataset['Deck'].replace(['A', 'B', 'C'], 'ABC') dataset['Deck'] = dataset['Deck'].replace(['D', 'E'], 'DE') dataset['Deck'] = dataset['Deck'].replace(['F', 'G'], 'FG') dataset['Ticket_Frequency'] = dataset.groupby('Ticket')['Ticket'].transform('count') dataset['1_Class'] = dataset['Pclass'].map(lambda s: 1 if s == 1 else 0) dataset['2_Class'] = dataset['Pclass'].map(lambda s: 1 if s == 2 else 0) dataset['3_Class'] = dataset['Pclass'].map(lambda s: 1 if s == 3 else 0) dataset['FamilyG1'] = dataset['Family_size'].map(lambda s: 1 if s == 1 else 0) dataset['FamilyG2'] = dataset['Family_size'].map(lambda s: 1 if (s >= 2) & (s <= 4) else 0) dataset['FamilyG3'] = dataset['Family_size'].map(lambda s: 1 if (s >= 5) & (s <= 6) else 0) dataset['FamilyG4'] = dataset['Family_size'].map(lambda s: 1 if s > 6 else 0) dataset['Age'] = pd.qcut(dataset['Age'], q=10, duplicates='drop') dataset['Fare'] = pd.qcut(dataset['Fare'], q=13, duplicates='drop') dataset['Male'] = dataset['Sex'].map(lambda s: 1 if s == 'male' else 0) dataset['Female'] = dataset['Sex'].map(lambda s: 1 if s == 'female' else 0) dataset['S_embarked'] = dataset['Embarked'].map(lambda s: 1 if s == 'S' else 0) dataset['C_embarked'] = dataset['Embarked'].map(lambda s: 1 if s == 'C' else 0) dataset['Q_embarked'] = dataset['Embarked'].map(lambda s: 1 if s == 'Q' else 0) dataset['Title'] = dataset['Name'].str.split(', ', expand=True)[1].str.split('.', expand=True)[0] dataset['Title'] = dataset['Title'].replace(['Miss', 'Mrs', 'Ms', 'Mlle', 'Lady', 'Mme', 'the Countess', 'Dona'], 'Miss/Mrs/Ms') dataset['Title'] = dataset['Title'].replace(['Dr', 'Col', 'Major', 'Jonkheer', 'Capt', 'Sir', 'Don', 'Rev'], 'Dr/Military/Noble/Clergy') markdown_data = pd.concat([markdown_data, pd.get_dummies(markdown_data['Title'])], axis=1) final_approval_data = pd.concat([final_approval_data, pd.get_dummies(final_approval_data['Title'])], axis=1) markdown_data = pd.concat([markdown_data, pd.get_dummies(markdown_data['Deck'])], axis=1) final_approval_data = pd.concat([final_approval_data, pd.get_dummies(final_approval_data['Deck'])], axis=1) markdown_data['Age'] = LabelEncoder().fit_transform(markdown_data['Age']) final_approval_data['Age'] = LabelEncoder().fit_transform(final_approval_data['Age']) markdown_data['Fare'] = LabelEncoder().fit_transform(markdown_data['Fare']) final_approval_data['Fare'] = LabelEncoder().fit_transform(final_approval_data['Fare']) non_unique_families = [x for x in markdown_data['Family'].unique() if x in final_approval_data['Family'].unique()] non_unique_tickets = [x for x in markdown_data['Ticket'].unique() if x in final_approval_data['Ticket'].unique()] df_family_survival_rate = markdown_data.groupby('Family')['Survived', 'Family_size'].median() df_ticket_survival_rate = markdown_data.groupby('Ticket')['Survived', 'Ticket_Frequency'].median() family_rates = {} ticket_rates = {} for i in range(len(df_family_survival_rate)): if df_family_survival_rate.index[i] in non_unique_families and df_family_survival_rate.iloc[i, 1] > 1: family_rates[df_family_survival_rate.index[i]] = df_family_survival_rate.iloc[i, 0] for i in range(len(df_ticket_survival_rate)): if df_ticket_survival_rate.index[i] in non_unique_tickets and df_ticket_survival_rate.iloc[i, 1] > 1: ticket_rates[df_ticket_survival_rate.index[i]] = df_ticket_survival_rate.iloc[i, 0]
code
74052792/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd markdown_data = pd.read_csv('../input/titanic/train.csv') final_approval_data = pd.read_csv('../input/titanic/test.csv') passenger_id_final = final_approval_data['PassengerId'] def detect_outliers(df, n, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers Outliers_to_drop = detect_outliers(markdown_data, 2, ['Age', 'SibSp', 'Parch', 'Fare']) markdown_data.loc[Outliers_to_drop]
code
74052792/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from collections import Counter import numpy as np import pandas as pd markdown_data = pd.read_csv('../input/titanic/train.csv') final_approval_data = pd.read_csv('../input/titanic/test.csv') passenger_id_final = final_approval_data['PassengerId'] def detect_outliers(df, n, features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] for col in features: Q1 = np.percentile(df[col], 25) Q3 = np.percentile(df[col], 75) IQR = Q3 - Q1 outlier_step = 1.5 * IQR outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step)].index outlier_indices.extend(outlier_list_col) outlier_indices = Counter(outlier_indices) multiple_outliers = list((k for k, v in outlier_indices.items() if v > n)) return multiple_outliers Outliers_to_drop = detect_outliers(markdown_data, 2, ['Age', 'SibSp', 'Parch', 'Fare']) markdown_data.loc[Outliers_to_drop] markdown_data = markdown_data.drop(Outliers_to_drop).reset_index(drop=True) train_len = len(markdown_data) data = pd.concat([markdown_data, final_approval_data], axis=0).reset_index(drop=True) data.isnull().sum()
code
50239687/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.lineplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.lineplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.lineplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.lineplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.lineplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.lineplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.lineplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.lineplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.lineplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.barplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.barplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.barplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.barplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.barplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.barplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.barplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.barplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.barplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() sns.pairplot(data) plt.show()
code
50239687/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() print(data.size // 8)
code
50239687/cell_6
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data.head(10)
code
50239687/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() fig = plt.figure(figsize=(20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.lineplot(data=data, x='Age', y='Survived', ax=ax_1) sns.lineplot(data=data, x='Sex', y='Survived', ax=ax_2) sns.lineplot(data=data, x='Fare', y='Survived', ax=ax_3) sns.lineplot(data=data, x='Pclass', y='Survived', ax=ax_4) sns.lineplot(data=data, x='sibsp', y='Survived', ax=ax_5) sns.lineplot(data=data, x='Parch', y='Survived', ax=ax_6) sns.lineplot(data=data, x='Embarked', y='Survived', ax=ax_7) sns.lineplot(data=data, x='Age', y='Fare', ax=ax_8) sns.lineplot(data=data, x='Sex', y='Fare', ax=ax_9) plt.show()
code
50239687/cell_1
[ "image_output_1.png" ]
from IPython.display import Image import os from IPython.display import Image Image(filename='../input/titlecw/title.png')
code
50239687/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') sns.heatmap(data.isna()) plt.show()
code
50239687/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') print(data.size // 8)
code
50239687/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.lineplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.lineplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.lineplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.lineplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.lineplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.lineplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.lineplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.lineplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.lineplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.barplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.barplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.barplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.barplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.barplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.barplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.barplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.barplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.barplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() titles = ['Age', 'Fare', 'Sex', 'sibsp', 'Parch', 'Pclass', 'Embarked', 'Survived'] for i in titles: print(f'\nExpectancy of {i} =', round(data[f'{i}'].mean(), 3)) print(f'Standard deviation of {i} =', round(data[f'{i}'].std(), 3)) print('\n')
code
50239687/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.lineplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.lineplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.lineplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.lineplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.lineplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.lineplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.lineplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.lineplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.lineplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.barplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.barplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.barplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.barplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.barplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.barplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.barplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.barplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.barplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() sns.heatmap(data=data.corr(), linewidths=0.5, annot=True) plt.show()
code
50239687/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/titanic/train_and_test2.csv') data = data.rename(columns={'2urvived': 'Survived'}) data = data.drop(columns=['Passengerid', 'zero']) for i in range(1, 19): data = data.drop(columns=f'zero.{i}') data = data.dropna() fig = plt.figure(figsize = (20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.lineplot(data = data, x = "Age", y = "Survived", ax = ax_1) sns.lineplot(data = data, x = "Sex", y = "Survived", ax = ax_2) sns.lineplot(data = data, x = "Fare", y = "Survived", ax = ax_3) sns.lineplot(data = data, x = "Pclass", y = "Survived", ax = ax_4) sns.lineplot(data = data, x = "sibsp", y = "Survived", ax = ax_5) sns.lineplot(data = data, x = "Parch", y = "Survived", ax = ax_6) sns.lineplot(data = data, x = "Embarked", y = "Survived", ax = ax_7) sns.lineplot(data = data, x = "Age", y = "Fare", ax = ax_8) sns.lineplot(data = data, x = "Sex", y = "Fare", ax = ax_9) plt.show() fig = plt.figure(figsize=(20, 20)) ax_1 = fig.add_subplot(3, 3, 1) ax_2 = fig.add_subplot(3, 3, 2) ax_3 = fig.add_subplot(3, 3, 3) ax_4 = fig.add_subplot(3, 3, 4) ax_5 = fig.add_subplot(3, 3, 5) ax_6 = fig.add_subplot(3, 3, 6) ax_7 = fig.add_subplot(3, 3, 7) ax_8 = fig.add_subplot(3, 3, 8) ax_9 = fig.add_subplot(3, 3, 9) sns.barplot(data=data, x='Age', y='Survived', ax=ax_1) sns.barplot(data=data, x='Sex', y='Survived', ax=ax_2) sns.barplot(data=data, x='Fare', y='Survived', ax=ax_3) sns.barplot(data=data, x='Pclass', y='Survived', ax=ax_4) sns.barplot(data=data, x='sibsp', y='Survived', ax=ax_5) sns.barplot(data=data, x='Parch', y='Survived', ax=ax_6) sns.barplot(data=data, x='Embarked', y='Survived', ax=ax_7) sns.barplot(data=data, x='Age', y='Fare', ax=ax_8) sns.barplot(data=data, x='Sex', y='Fare', ax=ax_9) plt.show()
code
50239687/cell_5
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/titanic/train_and_test2.csv') data.head(10)
code
122258520/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import tensorflow as tf train = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv') test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv') revealed_test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/revealed_test.csv') census = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv') revealed_test_dates = revealed_test.first_day_of_month.unique() train = pd.concat([train, revealed_test]) train.index = np.arange(0, len(train)) dates = train.first_day_of_month.unique() dates unique_cfips = list(train.cfips.unique()) def smape(y_true, y_pred): denominator = (y_true + tf.abs(y_pred)) / 200.0 diff = tf.abs(y_true - y_pred) / denominator diff = tf.where(denominator == 0, 0.0, diff) return tf.reduce_mean(diff) def cauclate_smape(item): cfips = item.iloc[0].cfips y_true = tf.constant(item['microbusiness_density'], dtype=tf.float64) y_pred = tf.constant(item['prediction'], dtype=tf.float64) return smape(y_true, y_pred).numpy() train['prediction'] = 0 for i in range(len(dates) - 5): date = dates[i] df = train[train.first_day_of_month == date].copy() last_value_dict = dict() for j in range(len(df)): last_value_dict[df.iloc[j].cfips] = df.iloc[j].microbusiness_density validate_dates = dates[i + 3:i + 6] for date in validate_dates: df = train[train.first_day_of_month == date].copy() train.loc[train.first_day_of_month == date, 'prediction'] = df.cfips.apply(lambda cfips: last_value_dict[cfips]) smapes = train[train.first_day_of_month.isin(validate_dates)].groupby('first_day_of_month').apply(cauclate_smape) print(f'Last Value Date:{date} Validation Date:{validate_dates}') print(smapes) print('Validation Score') print(np.mean(smapes))
code
122258520/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv') test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv') revealed_test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/revealed_test.csv') census = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv') revealed_test_dates = revealed_test.first_day_of_month.unique() train = pd.concat([train, revealed_test]) train.index = np.arange(0, len(train)) train.head()
code
122258520/cell_10
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv') test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv') revealed_test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/revealed_test.csv') census = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv') revealed_test_dates = revealed_test.first_day_of_month.unique() train = pd.concat([train, revealed_test]) train.index = np.arange(0, len(train)) COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E'] df2020 = pd.read_csv('/kaggle/input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS) df2020 = df2020.iloc[1:] df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int') df2021 = pd.read_csv('/kaggle/input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS) df2021 = df2021.iloc[1:] df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int') df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1])) adult2020 = df2020.set_index('cfips').S0101_C01_026E.to_dict() df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1])) adult2021 = df2021.set_index('cfips').S0101_C01_026E.to_dict()
code
122258520/cell_12
[ "text_plain_output_1.png" ]
from tqdm.notebook import tqdm import numpy as np import pandas as pd import tensorflow as tf train = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv') test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv') revealed_test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/revealed_test.csv') census = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv') revealed_test_dates = revealed_test.first_day_of_month.unique() train = pd.concat([train, revealed_test]) train.index = np.arange(0, len(train)) dates = train.first_day_of_month.unique() dates unique_cfips = list(train.cfips.unique()) def smape(y_true, y_pred): denominator = (y_true + tf.abs(y_pred)) / 200.0 diff = tf.abs(y_true - y_pred) / denominator diff = tf.where(denominator == 0, 0.0, diff) return tf.reduce_mean(diff) def cauclate_smape(item): cfips = item.iloc[0].cfips y_true = tf.constant(item['microbusiness_density'], dtype=tf.float64) y_pred = tf.constant(item['prediction'], dtype=tf.float64) return smape(y_true, y_pred).numpy() train['prediction'] = 0 for i in range(len(dates) - 5): date = dates[i] df = train[train.first_day_of_month == date].copy() last_value_dict = dict() for j in range(len(df)): last_value_dict[df.iloc[j].cfips] = df.iloc[j].microbusiness_density validate_dates = dates[i + 3:i + 6] for date in validate_dates: df = train[train.first_day_of_month == date].copy() train.loc[train.first_day_of_month == date, 'prediction'] = df.cfips.apply(lambda cfips: last_value_dict[cfips]) smapes = train[train.first_day_of_month.isin(validate_dates)].groupby('first_day_of_month').apply(cauclate_smape) COLS = ['GEO_ID', 'NAME', 'S0101_C01_026E'] df2020 = pd.read_csv('/kaggle/input/census-data-for-godaddy/ACSST5Y2020.S0101-Data.csv', usecols=COLS) df2020 = df2020.iloc[1:] df2020['S0101_C01_026E'] = df2020['S0101_C01_026E'].astype('int') df2021 = pd.read_csv('/kaggle/input/census-data-for-godaddy/ACSST5Y2021.S0101-Data.csv', usecols=COLS) df2021 = df2021.iloc[1:] df2021['S0101_C01_026E'] = df2021['S0101_C01_026E'].astype('int') df2020['cfips'] = df2020.GEO_ID.apply(lambda x: int(x.split('US')[-1])) adult2020 = df2020.set_index('cfips').S0101_C01_026E.to_dict() df2021['cfips'] = df2021.GEO_ID.apply(lambda x: int(x.split('US')[-1])) adult2021 = df2021.set_index('cfips').S0101_C01_026E.to_dict() zero_cfips = list(train[train.microbusiness_density == 0].cfips.unique()) for cfips in zero_cfips: df = train[train.cfips == cfips].copy() df['microbusiness_density_imputation'] = df.iloc[-1][CFG.target_field] train.loc[train.cfips == cfips, CFG.target_field] = df['microbusiness_density_imputation'] train['active'].replace(0, 1, inplace=True) for cfips in tqdm(test.cfips.unique()): test.loc[test.cfips == cfips, CFG.target_field] = train[train.cfips == cfips].iloc[-1][CFG.target_field] test['adult2020'] = test.cfips.map(adult2020) test['adult2021'] = test.cfips.map(adult2021) test.microbusiness_density = test.microbusiness_density * test.adult2020 / test.adult2021 test[['row_id', CFG.target_field]].to_csv('submission.csv', index=False)
code
122258520/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/train.csv') test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/test.csv') revealed_test = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/revealed_test.csv') census = pd.read_csv('/kaggle/input/godaddy-microbusiness-density-forecasting/census_starter.csv') revealed_test_dates = revealed_test.first_day_of_month.unique() train = pd.concat([train, revealed_test]) train.index = np.arange(0, len(train)) dates = train.first_day_of_month.unique() dates
code
105190901/cell_13
[ "text_plain_output_1.png" ]
data = get_data(80)
code
105190901/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # to show result df = pd.DataFrame(data=data) df.head(1)
code
105190901/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # to show result df = pd.DataFrame(data=data) print(f'Number of rows is {df.shape[0]}') print(f'Number of Nones is {df.isna().sum().sum()} in a column {df.columns[df.isna().any()].tolist()[0]}')
code
18105662/cell_13
[ "text_plain_output_1.png" ]
from concurrent.futures import ProcessPoolExecutor as PoolExecutor, as_completed from google.cloud import automl_v1beta1 from tqdm import tqdm import operator import os import pandas as pd model_id = 'ICN8032497920993558639' score_threshold = 1e-06 gcp_service_account_json = '/kaggle/input/gcloudserviceaccountkey/kaggle-playground-170215-4ece6a076f22.json' gcp_project_id = 'kaggle-playground-170215' def get_prediction(file_path, project_id, model_id): name = 'projects/{}/locations/us-central1/models/{}'.format(project_id, model_id) with open(file_path, 'rb') as ff: content = ff.read() payload = {'image': {'image_bytes': content}} params = {'score_threshold': str(score_threshold)} request = prediction_client.predict(name, payload, params) return request def make_int(s): try: int(s) return int(s) except ValueError: return 1109 def process(i, df_sample_submission, project_id): id_code = df_sample_submission.index[i] if id_code in df_solution.index: return None exp_len = id_code.find('_') experiment = id_code[0:exp_len] plate = id_code[exp_len + 1:exp_len + 2] well = id_code[exp_len + 3:] pred_dict = {} res = [] for site in range(1, 3): file_path = '../input/recursion_rgb_512/testrgb512/testRGB512/{}_{}_{}_s{}.png'.format(experiment, plate, well, site) try: prediction_request = get_prediction(file_path, project_id, model_id) except Exception as e: return None for prediction in prediction_request.payload: label = make_int(prediction.display_name) if label <= 1108: pred_dict[label] = float(prediction.classification.score) sirna_prediction = max(pred_dict.items(), key=operator.itemgetter(1))[0] confidence = pred_dict[sirna_prediction] res.append({'id_code': id_code, 'site': site, 'sirna_prediction': sirna_prediction, 'confidence': confidence}) return res def generated_predictions_with_pool_executor(max_workers, gcp_project_id): results = [] df_sample_submission = pd.read_csv('../input/recursion-cellular-image-classification/sample_submission.csv', index_col=[0]) with PoolExecutor(max_workers=max_workers) as executor: futures_list = [executor.submit(process, i, df_sample_submission, gcp_project_id) for i in range(len(df_sample_submission))] for f in tqdm(as_completed(futures_list), total=len(futures_list)): results.append(f.result()) nb_escaped = 0 for r in results: if r is None: nb_escaped += 1 continue for site in r: df_solution.loc[site['id_code'], ['site{}_sirna'.format(site['site']), 'site{}_confidence'.format(site['site'])]] = [site['sirna_prediction'], site['confidence']] df_solution.to_csv('./submissions/submission_{}.csv'.format(model_id)) solution_file_path = ',/submissions/submission_{}.csv'.format(model_id) if os.path.exists(solution_file_path): df_solution = pd.read_csv(solution_file_path, index_col=[0]) else: df_solution = pd.DataFrame(columns=['site1_sirna', 'site1_confidence', 'site2_sirna', 'site2_confidence']) df_solution.index.name = 'id_code' prediction_client = automl_v1beta1.PredictionServiceClient.from_service_account_json(gcp_service_account_json) generated_predictions_with_pool_executor(20, gcp_project_id) generated_predictions_with_pool_executor(5, gcp_project_id) generated_predictions_with_pool_executor(5, gcp_project_id)
code
18105662/cell_4
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
#AutoML package !pip install google-cloud-automl
code
34134222/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.express as px import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34134222/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) master_df = pd.read_csv('/kaggle/input/tft-match-data/TFT_Master_MatchData.csv') time_last = master_df[['gameId', 'gameDuration']].drop_duplicates().gameDuration.agg(['min', 'mean', 'max']).to_frame() time_last.gameDuration = time_last.gameDuration.apply(lambda x: round(x / 60)) time_last.rename(columns={'gameDuration': 'gameDuration (min)'}, inplace=True) time_last
code
34134222/cell_8
[ "text_html_output_2.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.express as px master_df = pd.read_csv('/kaggle/input/tft-match-data/TFT_Master_MatchData.csv') time_last = master_df[['gameId', 'gameDuration']].drop_duplicates().gameDuration.agg(['min', 'mean', 'max']).to_frame() time_last.gameDuration = time_last.gameDuration.apply(lambda x: round(x / 60)) time_last.rename(columns={'gameDuration': 'gameDuration (min)'}, inplace=True) time_last px.bar(time_last, x=time_last.index, y=time_last.values)
code
34134222/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) master_df = pd.read_csv('/kaggle/input/tft-match-data/TFT_Master_MatchData.csv') master_df.head(8)
code
74070921/cell_4
[ "text_plain_output_1.png" ]
from configparser import ConfigParser from configparser import ConfigParser import matplotlib.pyplot as plt import matplotlib.pyplot as plt import os import os import os import tensorflow as tf import tensorflow as tf import numpy as np import pandas as pd import os # -*- coding: utf-8 -*- # @Author: Yulin Liu # @Date: 2018-10-10 14:23:23 # @Last Modified by: Yulin Liu # @Last Modified time: 2018-10-10 22:20:47 import numpy as np import tensorflow as tf import os from configparser import ConfigParser # from rnn_encoder_decoder import LSTM_model import matplotlib.pyplot as plt class visual_graph: def __init__(self, conf_path, restored_model_path): self.restored_model_path = restored_model_path self.conf_path = conf_path self.load_configs() def load_configs(self): parser = ConfigParser(os.environ) parser.read(self.conf_path) config_header = 'nn' self.n_input = parser.getint(config_header, 'n_input') self.n_channels = parser.getint('convolution', 'n_channels') self.n_controled_var = parser.getint('lstm', 'n_controled_var') self.n_encode = parser.getint(config_header, 'n_encode') self.state_size = parser.getint('lstm', 'n_cell_dim') self.n_layer = parser.getint('lstm', 'n_lstm_layers') # Number of contextual samples to include self.batch_size = parser.getint(config_header, 'batch_size') def define_placeholder(self): # define placeholder self.input_encode_tensor = tf.placeholder(dtype = tf.float32, shape = [None, None, self.n_encode], name = 'encode_tensor') self.seq_len_encode = tf.placeholder(dtype = tf.int32, shape = [None], name = 'seq_length_encode') self.input_tensor = tf.placeholder(dtype = tf.float32, shape = [None, None, self.n_input, self.n_input, self.n_channels], name = 'decode_feature_map') self.input_decode_coords_tensor = tf.placeholder(dtype = tf.float32, shape = [None, None, self.n_controled_var], name = 'decode_coords') self.target = tf.placeholder(dtype = tf.float32, shape = [None, None, self.n_controled_var], name = 'target') self.target_end = tf.placeholder(dtype = tf.float32, shape = [None, None, 1], name = 'target_end') self.target_end_neg = tf.placeholder(dtype = tf.float32, shape = [None, None, 1], name = 'target_end_neg') self.seq_length = tf.placeholder(dtype = tf.int32, shape = [None], name = 'seq_length_decode') return def launchGraph(self): self.define_placeholder() self.MODEL = LSTM_model(conf_path = self.conf_path, batch_x = self.input_encode_tensor, seq_length = self.seq_len_encode, n_input = self.n_encode, batch_x_decode = self.input_tensor, batch_xcoords_decode = self.input_decode_coords_tensor, seq_length_decode = self.seq_length, n_input_decode = self.n_input, target = self.target, train = False, weight_summary = False) return def feed_fwd_convlayer(self, feed_input): with tf.device('/cpu:0'): self.graph = tf.Graph() self.launchGraph() self.sess = tf.Session() self.saver = tf.train.Saver() self.saver.restore(self.sess, self.restored_model_path) self.sess.graph.finalize() self.weights = self._return_weights() conv1_out, conv2_out, conv3_out = self._feed_fwd_convlayer(feed_input) self.sess.close() return conv1_out, conv2_out, conv3_out def _return_weights(self): weight_list = tf.trainable_variables() weights = {} for v in weight_list: weights[v.name] = self.sess.run(v) return weights def _feed_fwd_convlayer(self, feed_input): # feed_input should have the shape of [?, ?, 20, 20, 4] conv1_out = self.sess.run(self.MODEL.conv1, feed_dict={self.input_tensor: feed_input}) conv2_out = self.sess.run(self.MODEL.conv2, feed_dict={self.input_tensor: feed_input}) conv3_out = self.sess.run(self.MODEL.conv3, feed_dict={self.input_tensor: feed_input}) return conv1_out, conv2_out, conv3_out def visualize_raw_weights(weight_var, fig_size = (8, 4)): n_layers = weight_var.shape[3] n_channels = weight_var.shape[2] fig, axs = plt.subplots(n_channels, n_layers, figsize=fig_size, facecolor='w', edgecolor='k') axs = axs.ravel() for i in range(n_channels): for j in range(n_layers): axs[n_layers * i + j].imshow(weight_var[:, :, i, j], cmap = 'bwr', vmax = weight_var.max(), vmin = weight_var.min()) axs[n_layers * i + j].set_axis_off() plt.show() return fig def visualize_conv_layers(conv_layer, nrow, ncol, fig_size): print(conv_layer.shape) # n_layers = weight_var.shape[3] # n_channels = weight_var.shape[2] fig, axs = plt.subplots(nrow, ncol, figsize=fig_size, facecolor='w', edgecolor='k') fig.subplots_adjust(wspace = 0.01, hspace = 0.01) axs = axs.ravel() for i in range(nrow): for j in range(ncol): axs[ncol * i + j].imshow(conv_layer[j, :, :, i], cmap = 'bwr', vmax = conv_layer[:, :, :, i].max(), vmin = conv_layer[:, :, :, i].min(), origin = 'lower') axs[ncol * i + j].set_axis_off() plt.show() return fig ''' Example Code: ''' ''' tf.reset_default_graph() restored_model_path = 'visual_network/model.ckpt-99' config_path = 'configs/encoder_decoder_nn.ini' visual_graph_class = visual_graph(config_path, restored_model_path) visual_graph_class.restore_model() weights = visual_graph_class.weights visualize_raw_weights(weight_var=weights['wc1:0'], fig_size = (8, 2)) visualize_raw_weights(weight_var=weights['wc2:0'], fig_size = (8,4)) visualize_raw_weights(weight_var=weights['wc3:0'], fig_size = (8,4)) ''' import numpy as np import tensorflow as tf import os from configparser import ConfigParser import matplotlib.pyplot as plt class visual_graph: def __init__(self, conf_path, restored_model_path): self.restored_model_path = restored_model_path self.conf_path = conf_path self.load_configs() def load_configs(self): parser = ConfigParser(os.environ) parser.read(self.conf_path) config_header = 'nn' self.n_input = parser.getint(config_header, 'n_input') self.n_channels = parser.getint('convolution', 'n_channels') self.n_controled_var = parser.getint('input_dimension', 'n_controled_var') self.n_coords_var = parser.getint('input_dimension', 'n_coords_var') self.n_encode = parser.getint(config_header, 'n_encode') self.state_size = parser.getint('lstm', 'n_cell_dim') self.n_layer = parser.getint('lstm', 'n_lstm_layers') self.batch_size = parser.getint(config_header, 'batch_size') def define_placeholder(self): self.input_encode_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_encode], name='encode_tensor') self.seq_len_encode = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_encode') self.input_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_input, self.n_input, self.n_channels], name='decode_feature_map') self.input_decode_coords_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var + self.n_coords_var + 1], name='decode_coords') self.target = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var + self.n_coords_var], name='target') self.seq_length = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_decode') return def launchGraph(self): self.define_placeholder() self.MODEL = LSTM_model(conf_path=self.conf_path, batch_x=self.input_encode_tensor, seq_length=self.seq_len_encode, n_input=self.n_encode, batch_x_decode=self.input_tensor, batch_xcoords_decode=self.input_decode_coords_tensor, seq_length_decode=self.seq_length, n_input_decode=self.n_input, target=self.target, train=False, weight_summary=False) return def feed_fwd_convlayer(self, feed_input): with tf.device('/cpu:0'): self.graph = tf.Graph() self.launchGraph() self.sess = tf.Session() self.saver = tf.train.Saver() self.saver.restore(self.sess, self.restored_model_path) self.sess.graph.finalize() self.weights = self._return_weights() conv1_out, conv2_out, conv3_out, dense_out = self._feed_fwd_convlayer(feed_input) self.sess.close() return (conv1_out, conv2_out, conv3_out, dense_out) def _return_weights(self): weight_list = tf.trainable_variables() weights = {} for v in weight_list: weights[v.name] = self.sess.run(v) return weights def _feed_fwd_convlayer(self, feed_input): conv1_out = self.sess.run(self.MODEL.conv1, feed_dict={self.input_tensor: feed_input}) conv2_out = self.sess.run(self.MODEL.conv2, feed_dict={self.input_tensor: feed_input}) conv3_out = self.sess.run(self.MODEL.conv3, feed_dict={self.input_tensor: feed_input}) dense_out = self.sess.run(self.MODEL.fc1, feed_dict={self.input_tensor: feed_input}) return (conv1_out, conv2_out, conv3_out, dense_out) def visualize_raw_weights(weight_var, fig_size=(8, 4)): n_layers = weight_var.shape[3] n_channels = weight_var.shape[2] fig, axs = plt.subplots(n_channels, n_layers, figsize=fig_size, facecolor='w', edgecolor='k') axs = axs.ravel() for i in range(n_channels): for j in range(n_layers): axs[n_layers * i + j].imshow(weight_var[:, :, i, j], cmap='bwr', vmax=weight_var.max(), vmin=weight_var.min()) axs[n_layers * i + j].set_axis_off() plt.show() return fig def visualize_conv_layers(conv_layer, nrow, ncol, fig_size): print(conv_layer.shape) fig, axs = plt.subplots(nrow, ncol, figsize=fig_size, facecolor='w', edgecolor='k') fig.subplots_adjust(wspace=0.01, hspace=0.01) axs = axs.ravel() for i in range(nrow): for j in range(ncol): axs[ncol * i + j].imshow(conv_layer[j, :, :, i], cmap='bwr', vmax=conv_layer[:, :, :, i].max(), vmin=conv_layer[:, :, :, i].min(), origin='lower') axs[ncol * i + j].set_axis_off() plt.show() return fig '\nExample Code:\n' "\ntf.reset_default_graph()\nrestored_model_path = 'visual_network/model.ckpt-99'\nconfig_path = 'configs/encoder_decoder_nn.ini'\nvisual_graph_class = visual_graph(config_path, restored_model_path)\nvisual_graph_class.restore_model()\nweights = visual_graph_class.weights\nvisualize_raw_weights(weight_var=weights['wc1:0'], fig_size = (8, 2))\nvisualize_raw_weights(weight_var=weights['wc2:0'], fig_size = (8,4))\nvisualize_raw_weights(weight_var=weights['wc3:0'], fig_size = (8,4))\n"
code
74070921/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74070921/cell_3
[ "text_plain_output_1.png" ]
from configparser import ConfigParser import matplotlib.pyplot as plt import os import os import tensorflow as tf import numpy as np import pandas as pd import os import numpy as np import tensorflow as tf import os from configparser import ConfigParser import matplotlib.pyplot as plt class visual_graph: def __init__(self, conf_path, restored_model_path): self.restored_model_path = restored_model_path self.conf_path = conf_path self.load_configs() def load_configs(self): parser = ConfigParser(os.environ) parser.read(self.conf_path) config_header = 'nn' self.n_input = parser.getint(config_header, 'n_input') self.n_channels = parser.getint('convolution', 'n_channels') self.n_controled_var = parser.getint('lstm', 'n_controled_var') self.n_encode = parser.getint(config_header, 'n_encode') self.state_size = parser.getint('lstm', 'n_cell_dim') self.n_layer = parser.getint('lstm', 'n_lstm_layers') self.batch_size = parser.getint(config_header, 'batch_size') def define_placeholder(self): self.input_encode_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_encode], name='encode_tensor') self.seq_len_encode = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_encode') self.input_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_input, self.n_input, self.n_channels], name='decode_feature_map') self.input_decode_coords_tensor = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var], name='decode_coords') self.target = tf.placeholder(dtype=tf.float32, shape=[None, None, self.n_controled_var], name='target') self.target_end = tf.placeholder(dtype=tf.float32, shape=[None, None, 1], name='target_end') self.target_end_neg = tf.placeholder(dtype=tf.float32, shape=[None, None, 1], name='target_end_neg') self.seq_length = tf.placeholder(dtype=tf.int32, shape=[None], name='seq_length_decode') return def launchGraph(self): self.define_placeholder() self.MODEL = LSTM_model(conf_path=self.conf_path, batch_x=self.input_encode_tensor, seq_length=self.seq_len_encode, n_input=self.n_encode, batch_x_decode=self.input_tensor, batch_xcoords_decode=self.input_decode_coords_tensor, seq_length_decode=self.seq_length, n_input_decode=self.n_input, target=self.target, train=False, weight_summary=False) return def feed_fwd_convlayer(self, feed_input): with tf.device('/cpu:0'): self.graph = tf.Graph() self.launchGraph() self.sess = tf.Session() self.saver = tf.train.Saver() self.saver.restore(self.sess, self.restored_model_path) self.sess.graph.finalize() self.weights = self._return_weights() conv1_out, conv2_out, conv3_out = self._feed_fwd_convlayer(feed_input) self.sess.close() return (conv1_out, conv2_out, conv3_out) def _return_weights(self): weight_list = tf.trainable_variables() weights = {} for v in weight_list: weights[v.name] = self.sess.run(v) return weights def _feed_fwd_convlayer(self, feed_input): conv1_out = self.sess.run(self.MODEL.conv1, feed_dict={self.input_tensor: feed_input}) conv2_out = self.sess.run(self.MODEL.conv2, feed_dict={self.input_tensor: feed_input}) conv3_out = self.sess.run(self.MODEL.conv3, feed_dict={self.input_tensor: feed_input}) return (conv1_out, conv2_out, conv3_out) def visualize_raw_weights(weight_var, fig_size=(8, 4)): n_layers = weight_var.shape[3] n_channels = weight_var.shape[2] fig, axs = plt.subplots(n_channels, n_layers, figsize=fig_size, facecolor='w', edgecolor='k') axs = axs.ravel() for i in range(n_channels): for j in range(n_layers): axs[n_layers * i + j].imshow(weight_var[:, :, i, j], cmap='bwr', vmax=weight_var.max(), vmin=weight_var.min()) axs[n_layers * i + j].set_axis_off() plt.show() return fig def visualize_conv_layers(conv_layer, nrow, ncol, fig_size): print(conv_layer.shape) fig, axs = plt.subplots(nrow, ncol, figsize=fig_size, facecolor='w', edgecolor='k') fig.subplots_adjust(wspace=0.01, hspace=0.01) axs = axs.ravel() for i in range(nrow): for j in range(ncol): axs[ncol * i + j].imshow(conv_layer[j, :, :, i], cmap='bwr', vmax=conv_layer[:, :, :, i].max(), vmin=conv_layer[:, :, :, i].min(), origin='lower') axs[ncol * i + j].set_axis_off() plt.show() return fig '\nExample Code:\n' "\ntf.reset_default_graph()\nrestored_model_path = 'visual_network/model.ckpt-99'\nconfig_path = 'configs/encoder_decoder_nn.ini'\nvisual_graph_class = visual_graph(config_path, restored_model_path)\nvisual_graph_class.restore_model()\nweights = visual_graph_class.weights\nvisualize_raw_weights(weight_var=weights['wc1:0'], fig_size = (8, 2))\nvisualize_raw_weights(weight_var=weights['wc2:0'], fig_size = (8,4))\nvisualize_raw_weights(weight_var=weights['wc3:0'], fig_size = (8,4))\n"
code
16147633/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') train_df.dtypes y_train = X = train_df['label'] X_train = train_df.drop(columns='label', axis=1) X_train.head()
code
16147633/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') test_df.dtypes
code
16147633/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') train_df.head()
code
16147633/cell_19
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') train_df.dtypes test_df.dtypes y_train = X = train_df['label'] X_train = train_df.drop(columns='label', axis=1) y_test = X = test_df['label'] X_test = test_df.drop(columns='label', axis=1) X_train.shape X_train = np.array(X_train) X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) X_test = np.array(X_test) X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) X_train[0]
code
16147633/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import tensorflow as tf import keras import os print(os.listdir('../input'))
code
16147633/cell_8
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') train_df.dtypes
code
16147633/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') train_df.dtypes y_train = X = train_df['label'] X_train = train_df.drop(columns='label', axis=1) X_train.shape
code
16147633/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') test_df.head()
code
16147633/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape) train_df = train_df.astype('float32') test_df = test_df.astype('float32') train_df.dtypes y_train = X = train_df['label'] X_train = train_df.drop(columns='label', axis=1) y_train.head()
code
16147633/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt plt.matshow(X_train[0])
code
16147633/cell_27
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten from keras.models import Sequential from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization model = Sequential() model.add(Flatten(input_shape=[28 * 28]))
code
16147633/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/fashion-mnist_train.csv') test_df = pd.read_csv('../input/fashion-mnist_test.csv') (train_df.shape, test_df.shape)
code
105187784/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd house_rent_df = pd.read_csv('/content/gdrive/MyDrive/Colab Notebooks/House_Rent_Dataset.csv')
code
50221247/cell_9
[ "text_html_output_1.png" ]
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D, Input, AveragePooling2D import tensorflow as tf model = tf.keras.models.Sequential([Input(shape=X_train.shape[1:]), Conv2D(32, 5, activation='relu', padding='same'), Conv2D(32, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Conv2D(64, 5, activation='relu', padding='same'), Conv2D(64, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Flatten(), Dense(256, activation='selu', kernel_initializer='lecun_normal'), BatchNormalization(), Dropout(0.4), Dense(10, activation='softmax', kernel_initializer='glorot_normal')]) checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only=True) early_stopping = tf.keras.callbacks.EarlyStopping(patience=20) model.compile(optimizer='nadam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=200, validation_data=(X_valid, y_valid), callbacks=[checkpoint, early_stopping]) model = tf.keras.models.load_model('model.h5') model.evaluate(X_test, y_test)
code
50221247/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50221247/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D, Input, AveragePooling2D import tensorflow as tf model = tf.keras.models.Sequential([Input(shape=X_train.shape[1:]), Conv2D(32, 5, activation='relu', padding='same'), Conv2D(32, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Conv2D(64, 5, activation='relu', padding='same'), Conv2D(64, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Flatten(), Dense(256, activation='selu', kernel_initializer='lecun_normal'), BatchNormalization(), Dropout(0.4), Dense(10, activation='softmax', kernel_initializer='glorot_normal')]) checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only=True) early_stopping = tf.keras.callbacks.EarlyStopping(patience=20) model.compile(optimizer='nadam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=200, validation_data=(X_valid, y_valid), callbacks=[checkpoint, early_stopping])
code
50221247/cell_8
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D, Input, AveragePooling2D import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') data_sub = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') model = tf.keras.models.Sequential([Input(shape=X_train.shape[1:]), Conv2D(32, 5, activation='relu', padding='same'), Conv2D(32, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Conv2D(64, 5, activation='relu', padding='same'), Conv2D(64, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Flatten(), Dense(256, activation='selu', kernel_initializer='lecun_normal'), BatchNormalization(), Dropout(0.4), Dense(10, activation='softmax', kernel_initializer='glorot_normal')]) checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only=True) early_stopping = tf.keras.callbacks.EarlyStopping(patience=20) model.compile(optimizer='nadam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=200, validation_data=(X_valid, y_valid), callbacks=[checkpoint, early_stopping]) figure = plt.figure(figsize=(15, 10)) plt.plot(pd.DataFrame(history.history)) plt.grid() plt.ylabel('Loss / accuracy value') plt.xlabel('Epoch') plt.title('Loss and accuracy curves') plt.legend(pd.DataFrame(history.history)) plt.show()
code
50221247/cell_14
[ "image_output_1.png" ]
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D, Input, AveragePooling2D import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') data_sub = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') X = data.iloc[:, 1:] y = data.iloc[:, 0] X = np.array(X).reshape(-1, 28, 28, 1) model = tf.keras.models.Sequential([Input(shape=X_train.shape[1:]), Conv2D(32, 5, activation='relu', padding='same'), Conv2D(32, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Conv2D(64, 5, activation='relu', padding='same'), Conv2D(64, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Flatten(), Dense(256, activation='selu', kernel_initializer='lecun_normal'), BatchNormalization(), Dropout(0.4), Dense(10, activation='softmax', kernel_initializer='glorot_normal')]) checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only=True) early_stopping = tf.keras.callbacks.EarlyStopping(patience=20) model.compile(optimizer='nadam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=200, validation_data=(X_valid, y_valid), callbacks=[checkpoint, early_stopping]) figure = plt.figure(figsize=(15,10)) plt.plot(pd.DataFrame(history.history)) plt.grid() plt.ylabel('Loss / accuracy value') plt.xlabel('Epoch') plt.title('Loss and accuracy curves') plt.legend(pd.DataFrame(history.history)) plt.show() model = tf.keras.models.load_model('model.h5') model.evaluate(X_test, y_test) data_sub = np.array(data_sub).reshape(-1, 28, 28, 1) / 255.0 preds = model.predict(data_sub) np.argmax(preds[0]) labels = [np.argmax(x) for x in preds] ids = [x + 1 for x in range(len(preds))] sub = pd.DataFrame() sub['ImageId'] = ids sub['Label'] = labels sub.to_csv('mnist_submission.csv', index=False) pd.read_csv('mnist_submission.csv')
code
50221247/cell_12
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Conv2D, MaxPooling2D, Input, AveragePooling2D import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf data = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') data_sub = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') X = data.iloc[:, 1:] y = data.iloc[:, 0] X = np.array(X).reshape(-1, 28, 28, 1) model = tf.keras.models.Sequential([Input(shape=X_train.shape[1:]), Conv2D(32, 5, activation='relu', padding='same'), Conv2D(32, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Conv2D(64, 5, activation='relu', padding='same'), Conv2D(64, 5, activation='relu', padding='same'), MaxPooling2D(pool_size=(2, 2)), BatchNormalization(), Dropout(0.3), Flatten(), Dense(256, activation='selu', kernel_initializer='lecun_normal'), BatchNormalization(), Dropout(0.4), Dense(10, activation='softmax', kernel_initializer='glorot_normal')]) checkpoint = tf.keras.callbacks.ModelCheckpoint('model.h5', save_best_only=True) early_stopping = tf.keras.callbacks.EarlyStopping(patience=20) model.compile(optimizer='nadam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) history = model.fit(X_train, y_train, epochs=200, validation_data=(X_valid, y_valid), callbacks=[checkpoint, early_stopping]) model = tf.keras.models.load_model('model.h5') model.evaluate(X_test, y_test) data_sub = np.array(data_sub).reshape(-1, 28, 28, 1) / 255.0 preds = model.predict(data_sub) np.argmax(preds[0])
code
32074095/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No'] employees_select = employees[['Attrition', 'Age', 'DistanceFromHome', 'WorkLifeBalance', 'EnvironmentSatisfaction', 'DailyRate', 'YearsAtCompany', 'YearsSinceLastPromotion']] employees_select employees_select.groupby('Attrition').mean() employees_select.groupby('Attrition').mean().iloc[:, 0:4].sort_values('Attrition', ascending=False).plot.barh()
code
32074095/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.head()
code
32074095/cell_26
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No'] employees_attrition = employees[employees.DistanceFromHome <= 10.632911] employees_attrition.groupby('Attrition').Attrition.size()['No'] / employees.groupby('Attrition').Attrition.size()['No'] employees_attrition = employees[employees.DistanceFromHome >= 10.632911] employees_attrition.groupby('Attrition').Attrition.size()['Yes'] / employees.groupby('Attrition').Attrition.size()['Yes']
code
32074095/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape
code
32074095/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No'] employees_select = employees[['Attrition', 'Age', 'DistanceFromHome', 'WorkLifeBalance', 'EnvironmentSatisfaction', 'DailyRate', 'YearsAtCompany', 'YearsSinceLastPromotion']] employees_select
code
32074095/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32074095/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees
code
32074095/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4]
code
32074095/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No']
code
32074095/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No'] employees_attrition = employees[employees.DistanceFromHome <= 10.632911] employees_attrition.groupby('Attrition').Attrition.size()['No'] / employees.groupby('Attrition').Attrition.size()['No']
code
32074095/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) employees = pd.read_csv('/kaggle/input/ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv') employees.set_index('Attrition') employees employees = employees.set_index('Attrition') employees employees = employees.reset_index() employees.shape employees.iloc[0:3, 0:4] employees[employees.Attrition == 'No'] employees_select = employees[['Attrition', 'Age', 'DistanceFromHome', 'WorkLifeBalance', 'EnvironmentSatisfaction', 'DailyRate', 'YearsAtCompany', 'YearsSinceLastPromotion']] employees_select employees_select.groupby('Attrition').mean()
code
104120345/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data-on-covid19-coronavirus/owid-covid-data.csv') data.info()
code
104120345/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data-on-covid19-coronavirus/owid-covid-data.csv') data.head()
code
104120345/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104120345/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data-on-covid19-coronavirus/owid-covid-data.csv') print(len(data))
code
104120345/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data-on-covid19-coronavirus/owid-covid-data.csv') data.describe()
code
2012241/cell_6
[ "image_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import seaborn as sns import math from IPython.display import HTML from subprocess import check_output State_time_series = pd.read_csv('../input/State_time_series.csv', parse_dates=True) State_time_series.Date = pd.to_datetime(State_time_series.Date) states = set(State_time_series[~State_time_series['ZHVI_AllHomes'].isnull() & ~State_time_series['MedianSoldPrice_AllHomes'].isnull()]['RegionName'].values) State_time_series_year = State_time_series[State_time_series['RegionName'].isin(states)].copy() highest_cost_states = State_time_series_year[['RegionName', 'ZHVI_AllHomes']].groupby('RegionName').max().sort_values(by=['ZHVI_AllHomes'], ascending=False)[:5].index.values.tolist() State_time_series_year = State_time_series_year[State_time_series_year.RegionName.isin(highest_cost_states)] State_time_series_year.year = State_time_series_year.Date.dt.year States_year_SalePrices = State_time_series_year.groupby([State_time_series_year.year, State_time_series_year.RegionName])['MedianSoldPrice_AllHomes'].mean().dropna().reset_index(name='SoldPrice') States_year_SalePrices.pivot(index='Date', columns='RegionName', values='SoldPrice').plot(figsize=(15, 8), linewidth=3, fontsize=14) plt.legend(fontsize=14) plt.ylabel('MedianSoldPrice_AllHomes') plt.xlabel('Year') plt.title('Top 5 US states and their Median Sold Prices over the years 1996 to 2016') plt.gca().get_xaxis().get_major_formatter().set_useOffset(False)
code
2012241/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import matplotlib.pyplot as plt import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') import seaborn as sns import math from IPython.display import HTML from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
90123938/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D import matplotlib.pyplot as plt # 导入绘图工具包 import numpy as np # 导入NumPy数学工具箱 def to_categorical(y, num_classes=None, dtype='float32'): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and (len(input_shape) > 1): input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical X_train = X_train_image.reshape(60000, 28, 28, 1) X_test = X_test_image.reshape(10000, 28, 28, 1) y_train = to_categorical(y_train_lable, 10) y_test = to_categorical(y_test_lable, 10) from keras import models from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D model = models.Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, validation_split=0.3, epochs=5, batch_size=128) score = model.evaluate(X_test, y_test) pred = model.predict(X_test[0].reshape(1, 28, 28, 1)) print(pred[0], '转换一下格式得到:', pred.argmax()) import matplotlib.pyplot as plt plt.imshow(X_test[0].reshape(28, 28), cmap='Greys')
code
90123938/cell_4
[ "text_plain_output_1.png" ]
import numpy as np # 导入NumPy数学工具箱 def to_categorical(y, num_classes=None, dtype='float32'): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and (len(input_shape) > 1): input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical X_train = X_train_image.reshape(60000, 28, 28, 1) X_test = X_test_image.reshape(10000, 28, 28, 1) y_train = to_categorical(y_train_lable, 10) y_test = to_categorical(y_test_lable, 10) print('特征集张量形状:', X_train_image.shape) print('第一个数据样本:\n', X_train_image[0])
code
90123938/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D from keras import models from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D model = models.Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
code
90123938/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from keras.datasets import mnist (X_train_image, y_train_lable), (X_test_image, y_test_lable) = mnist.load_data()
code
90123938/cell_7
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D import numpy as np # 导入NumPy数学工具箱 def to_categorical(y, num_classes=None, dtype='float32'): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and (len(input_shape) > 1): input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical X_train = X_train_image.reshape(60000, 28, 28, 1) X_test = X_test_image.reshape(10000, 28, 28, 1) y_train = to_categorical(y_train_lable, 10) y_test = to_categorical(y_test_lable, 10) from keras import models from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D model = models.Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, validation_split=0.3, epochs=5, batch_size=128)
code
90123938/cell_8
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D import numpy as np # 导入NumPy数学工具箱 def to_categorical(y, num_classes=None, dtype='float32'): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and (len(input_shape) > 1): input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical X_train = X_train_image.reshape(60000, 28, 28, 1) X_test = X_test_image.reshape(10000, 28, 28, 1) y_train = to_categorical(y_train_lable, 10) y_test = to_categorical(y_test_lable, 10) from keras import models from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D model = models.Sequential() model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, y_train, validation_split=0.3, epochs=5, batch_size=128) score = model.evaluate(X_test, y_test) print('测试集预测准确率:', score[1])
code
90123938/cell_3
[ "text_plain_output_1.png" ]
import numpy as np # 导入NumPy数学工具箱 def to_categorical(y, num_classes=None, dtype='float32'): y = np.array(y, dtype='int') input_shape = y.shape if input_shape and input_shape[-1] == 1 and (len(input_shape) > 1): input_shape = tuple(input_shape[:-1]) y = y.ravel() if not num_classes: num_classes = np.max(y) + 1 n = y.shape[0] categorical = np.zeros((n, num_classes), dtype=dtype) categorical[np.arange(n), y] = 1 output_shape = input_shape + (num_classes,) categorical = np.reshape(categorical, output_shape) return categorical X_train = X_train_image.reshape(60000, 28, 28, 1) X_test = X_test_image.reshape(10000, 28, 28, 1) y_train = to_categorical(y_train_lable, 10) y_test = to_categorical(y_test_lable, 10) print('数据集张量形状:', X_train.shape) print('第一个数据标签:', y_train[0])
code
90123938/cell_5
[ "text_plain_output_1.png" ]
print('第一个数据样本的标签:', y_train_lable[0])
code
32071949/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
def convert_news_text_to_specter_json(news_text_file): d = {} with open(news_text_file, 'r') as f: print('test') for i, l in enumerate(f): print(l) if i == 0: d['paper_id'] = l elif i == 1: d['url'] = l elif i == 2: d['title'] = l elif i == 3: d['abstract'] = l elif i == 4: d['body_text'] = l print(d) convert_news_text_to_specter_json('/kaggle/input/news-test-articles/002.txt')
code
2036996/cell_9
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss from sklearn.naive_bayes import MultinomialNB import numpy as np import pandas as pd import xgboost as xgb train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sampleSubmission = pd.read_csv('../input/sample_submission.csv') col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] trainTxt = train['comment_text'] testTxt = test['comment_text'] trainTxt = trainTxt.fillna('unknown') testTxt = testTxt.fillna('unknown') combinedTxt = pd.concat([trainTxt, testTxt], axis=0) vect = TfidfVectorizer(decode_error='ignore', use_idf=True, smooth_idf=True, min_df=10, ngram_range=(1, 3), lowercase=True, stop_words='english') combinedDtm = vect.fit_transform(combinedTxt) trainDtm = combinedDtm[:train.shape[0]] testDtm = vect.transform(testTxt) lrpreds = np.zeros((test.shape[0], len(col))) nbpreds = np.zeros((test.shape[0], len(col))) svmpreds = np.zeros((test.shape[0], len(col))) xgbpreds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): lr = LogisticRegression(C=4) lr.fit(trainDtm, train[j]) lrpreds[:, i] = lr.predict_proba(testDtm)[:, 1] train_preds = lr.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss) loss = [] for i, j in enumerate(col): nb = MultinomialNB() nb.fit(trainDtm, train[j]) nbpreds[:, i] = nb.predict_proba(testDtm)[:, 1] train_preds = nb.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss) loss = [] for i, j in enumerate(col): xg = xgb.XGBClassifier(max_depth=7, n_estimators=200, colsample_bytree=0.8, subsample=0.8, nthread=10, learning_rate=0.1) xg.fit(trainDtm, train[j]) xgbpreds[:, i] = xg.predict_proba(testDtm)[:, 1] train_preds = xg.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss)
code
2036996/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import pandas as pd import numpy as np from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from sklearn.naive_bayes import MultinomialNB from sklearn import svm import xgboost as xgb from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2036996/cell_7
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sampleSubmission = pd.read_csv('../input/sample_submission.csv') col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] trainTxt = train['comment_text'] testTxt = test['comment_text'] trainTxt = trainTxt.fillna('unknown') testTxt = testTxt.fillna('unknown') combinedTxt = pd.concat([trainTxt, testTxt], axis=0) vect = TfidfVectorizer(decode_error='ignore', use_idf=True, smooth_idf=True, min_df=10, ngram_range=(1, 3), lowercase=True, stop_words='english') combinedDtm = vect.fit_transform(combinedTxt) trainDtm = combinedDtm[:train.shape[0]] testDtm = vect.transform(testTxt) lrpreds = np.zeros((test.shape[0], len(col))) nbpreds = np.zeros((test.shape[0], len(col))) svmpreds = np.zeros((test.shape[0], len(col))) xgbpreds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): lr = LogisticRegression(C=4) lr.fit(trainDtm, train[j]) lrpreds[:, i] = lr.predict_proba(testDtm)[:, 1] train_preds = lr.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss)
code
2036996/cell_8
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression from sklearn.metrics import log_loss from sklearn.naive_bayes import MultinomialNB import numpy as np import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sampleSubmission = pd.read_csv('../input/sample_submission.csv') col = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] trainTxt = train['comment_text'] testTxt = test['comment_text'] trainTxt = trainTxt.fillna('unknown') testTxt = testTxt.fillna('unknown') combinedTxt = pd.concat([trainTxt, testTxt], axis=0) vect = TfidfVectorizer(decode_error='ignore', use_idf=True, smooth_idf=True, min_df=10, ngram_range=(1, 3), lowercase=True, stop_words='english') combinedDtm = vect.fit_transform(combinedTxt) trainDtm = combinedDtm[:train.shape[0]] testDtm = vect.transform(testTxt) lrpreds = np.zeros((test.shape[0], len(col))) nbpreds = np.zeros((test.shape[0], len(col))) svmpreds = np.zeros((test.shape[0], len(col))) xgbpreds = np.zeros((test.shape[0], len(col))) loss = [] for i, j in enumerate(col): lr = LogisticRegression(C=4) lr.fit(trainDtm, train[j]) lrpreds[:, i] = lr.predict_proba(testDtm)[:, 1] train_preds = lr.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss) loss = [] for i, j in enumerate(col): nb = MultinomialNB() nb.fit(trainDtm, train[j]) nbpreds[:, i] = nb.predict_proba(testDtm)[:, 1] train_preds = nb.predict_proba(trainDtm)[:, 1] loss.append(log_loss(train[j], train_preds)) np.mean(loss)
code
88079861/cell_21
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) test_images = np.asarray(test_images, np.float32) / 255 test_labels = np.asarray(test_labels) for i in range(30): plt.xticks([]) plt.yticks([]) plt.figure(figsize=(15, 10)) plt.suptitle('Test Images', fontsize=20) for i in range(30): plt.subplot(5, 6, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.xlabel(get_Label(test_labels[i])) plt.imshow(test_images[i], cmap=plt.cm.binary)
code