path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32068084/cell_28
[ "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB
code
32068084/cell_15
[ "text_plain_output_1.png" ]
from sklearn import decomposition import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train)
code
32068084/cell_16
[ "text_plain_output_1.png" ]
from sklearn import decomposition import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_
code
32068084/cell_35
[ "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC
code
32068084/cell_31
[ "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC
code
32068084/cell_27
[ "text_plain_output_1.png" ]
from sklearn import decomposition from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB
code
32068084/cell_37
[ "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import decomposition from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score, confusion_matrix from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') X_test_final.shape sc = ss() X_train = sc.fit_transform(X_train) X_valid = sc.transform(X_valid) X_test = sc.transform(X_test_final) pca = decomposition.PCA(0.95) pca.fit(X_train) pca.n_components_ X_train_pca = pca.transform(X_train) X_test_pca = pca.transform(X_test) X_valid_pca = pca.transform(X_valid) decision_tree = DecisionTreeClassifier() decision_tree.fit(X_train, y_train) y_pred = decision_tree.predict(X_valid) acc_decision_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_decision_tree extra_tree = DecisionTreeClassifier() extra_tree.fit(X_train, y_train) y_pred = extra_tree.predict(X_valid) acc_extra_tree = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_extra_tree rfc = RandomForestClassifier(criterion='entropy', n_estimators=1000, min_samples_split=8, random_state=42, verbose=5) rfc.fit(X_train, y_train) y_pred = rfc.predict(X_valid) acc_rfc = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_rfc GB = GradientBoostingClassifier(n_estimators=100, learning_rate=0.075, max_depth=13, max_features=0.5, min_samples_leaf=14, verbose=5) GB.fit(X_train, y_train) y_pred = GB.predict(X_valid) acc_GB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_GB HGB = HistGradientBoostingClassifier(learning_rate=0.075, loss='categorical_crossentropy', max_depth=8, min_samples_leaf=15) HGB = HGB.fit(X_train_pca, y_train) y_pred = HGB.predict(X_valid_pca) acc_HGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_HGB LGB = LGBMClassifier(objective='multiclass', learning_rate=0.75, num_iterations=100, num_leaves=50, random_state=123, max_depth=8) LGB.fit(X_train, y_train) y_pred = LGB.predict(X_valid) acc_LGB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LGB AB = AdaBoostClassifier(n_estimators=100, learning_rate=0.075) AB.fit(X_train, y_train) y_pred = AB.predict(X_valid) acc_AB = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_AB BC = BaggingClassifier(n_estimators=100) BC.fit(X_train_pca, y_train) y_pred = BC.predict(X_valid_pca) acc_BC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_BC xgb = XGBClassifier(n_estimators=1000, learning_rate=0.05, n_jobs=5) xgb.fit(X_train, y_train, early_stopping_rounds=5, eval_set=[(X_valid, y_valid)], verbose=False) y_pred = xgb.predict(X_valid) acc_xgb = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_xgb ETC = ExtraTreesClassifier(n_estimators=100) ETC.fit(X_train, y_train) y_pred = ETC.predict(X_valid) acc_ETC = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_ETC LG = LogisticRegression(solver='lbfgs', multi_class='multinomial') LG.fit(X_train, y_train) y_pred = LG.predict(X_valid) acc_LG = round(accuracy_score(y_valid, y_pred) * 100, 2) acc_LG
code
32068084/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train_df_final = pd.read_csv('../input/pumpitup-challenge-dataset/train_df_final.csv') X_test_final = pd.read_csv('../input/pumpitup-challenge-dataset/X_test_final.csv') train_df_final.shape
code
104131375/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.shape data2.isnull().sum() data2.drop(data2.iloc[:, 4:11], axis=1, inplace=True) data2.drop(['ID'], axis=1)
code
104131375/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape data.head()
code
104131375/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() import matplotlib.pyplot as plt plt.plot(genre) plt.xlabel('Anime') plt.ylabel('Number') plt.title('Top 10 Animes') plt.xticks(rotation=70) plt.grid(True) plt.show()
code
104131375/cell_19
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.shape data2.isnull().sum() data2.drop(data2.iloc[:, 4:11], axis=1, inplace=True) data2.head()
code
104131375/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape data.info()
code
104131375/cell_8
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape data.Anime.value_counts().head(10)
code
104131375/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.shape
code
104131375/cell_16
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.shape data2.info()
code
104131375/cell_17
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.shape data2.isnull().sum()
code
104131375/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() data2 = pd.read_csv('../input/anime-quotes-dataset/lessreal-data.csv', delimiter=';', skiprows=0, low_memory=False) data2.head()
code
104131375/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape genre = pd.DataFrame() genre
code
104131375/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/anime-quotes/AnimeQuotes.csv') data.shape
code
2022777/cell_4
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train.isnull().sum() gender_pivot = train.pivot_table(index='Sex', values='Survived') class_pivot = train.pivot_table(index='Pclass', values='Survived') family_cols = ['SibSp', 'Parch', 'Survived'] family = train[family_cols].copy() family['familysize'] = family[['SibSp', 'Parch']].sum(axis=1) familySize = family[['SibSp', 'Parch']].sum(axis=1) family['isalone'] = np.where(familySize >= 1, 1, 0) family_pivot = family.pivot_table(index='familysize', values='Survived') isalone_pivot = family.pivot_table(index='isalone', values='Survived') train['Fare'] = train['Fare'].fillna(train['Fare'].mean()) train['Embarked'] = train['Embarked'].fillna('S') holdout['Fare'] = holdout['Fare'].fillna(train['Fare'].mean()) holdout['Embarked'] = holdout['Embarked'].fillna('S') train['Age'] = train['Age'].fillna(-0.5) holdout['Age'] = holdout['Age'].fillna(-0.5) train.head(2) holdout.head(2)
code
2022777/cell_6
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.models import Sequential from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.feature_selection import RFECV from sklearn.linear_model import LogisticRegression from sklearn.model_selection import GridSearchCV from sklearn.model_selection import ShuffleSplit from sklearn.model_selection import StratifiedShuffleSplit from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train.isnull().sum() gender_pivot = train.pivot_table(index='Sex', values='Survived') class_pivot = train.pivot_table(index='Pclass', values='Survived') family_cols = ['SibSp', 'Parch', 'Survived'] family = train[family_cols].copy() family['familysize'] = family[['SibSp', 'Parch']].sum(axis=1) familySize = family[['SibSp', 'Parch']].sum(axis=1) family['isalone'] = np.where(familySize >= 1, 1, 0) family_pivot = family.pivot_table(index='familysize', values='Survived') isalone_pivot = family.pivot_table(index='isalone', values='Survived') train['Fare'] = train['Fare'].fillna(train['Fare'].mean()) train['Embarked'] = train['Embarked'].fillna('S') holdout['Fare'] = holdout['Fare'].fillna(train['Fare'].mean()) holdout['Embarked'] = holdout['Embarked'].fillna('S') train['Age'] = train['Age'].fillna(-0.5) holdout['Age'] = holdout['Age'].fillna(-0.5) cuts = [-1, 0, 5, 12, 18, 35, 60, 100] labels = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train['Age_categories'] = pd.cut(train['Age'], cuts, labels=labels) holdout['Age_categories'] = pd.cut(holdout['Age'], cuts, labels=labels) fare_cuts = [-1, 12, 50, 100, 1000] fare_labels = ['0-12', '12-50', '50-100', '100+'] train['Fare_categories'] = pd.cut(train['Fare'], fare_cuts, labels=fare_labels) holdout['Fare_categories'] = pd.cut(holdout['Fare'], fare_cuts, labels=fare_labels) train['Cabin_type'] = train['Cabin'].str[0] train['Cabin_type'] = train['Cabin_type'].fillna('Unknown') train = train.drop('Cabin', axis=1) holdout['Cabin_type'] = holdout['Cabin'].str[0] holdout['Cabin_type'] = holdout['Cabin_type'].fillna('Unknown') holdout = holdout.drop('Cabin', axis=1) titles = {'Mr': 'Mr', 'Mme': 'Mrs', 'Ms': 'Mrs', 'Mrs': 'Mrs', 'Master': 'Master', 'Mlle': 'Miss', 'Miss': 'Miss', 'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Dr': 'Officer', 'Rev': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Countess': 'Royalty', 'Dona': 'Royalty', 'Lady': 'Royalty'} train_titles = train['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) train['Title'] = train_titles.map(titles) holdout_titles = holdout['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) holdout['Title'] = holdout_titles.map(titles) familySize_train = train[['SibSp', 'Parch']].sum(axis=1) train['isalone'] = np.where(familySize_train >= 1, 1, 0) familySize_holdout = holdout[['SibSp', 'Parch']].sum(axis=1) holdout['isalone'] = np.where(familySize_holdout >= 1, 1, 0) def get_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df columnNames = ['Age_categories', 'Pclass', 'Sex', 'Fare_categories', 'Title', 'Cabin_type', 'Embarked'] for column in columnNames: dummies_train = pd.get_dummies(train[column], prefix=column) train = pd.concat([train, dummies_train], axis=1) dummies_holdout = pd.get_dummies(holdout[column], prefix=column) holdout = pd.concat([holdout, dummies_holdout], axis=1) columns = ['Age_categories_Missing', 'Age_categories_Infant', 'Age_categories_Child', 'Age_categories_Teenager', 'Age_categories_Young Adult', 'Age_categories_Adult', 'Age_categories_Senior', 'Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S', 'Fare_categories_0-12', 'Fare_categories_12-50', 'Fare_categories_50-100', 'Fare_categories_100+', 'Title_Master', 'Title_Miss', 'Title_Mr', 'Title_Mrs', 'Title_Officer', 'Title_Royalty', 'Cabin_type_A', 'Cabin_type_B', 'Cabin_type_C', 'Cabin_type_D', 'Cabin_type_E', 'Cabin_type_F', 'Cabin_type_G', 'Cabin_type_Unknown', 'isalone'] def get_model(df, features): train_X = df[features] train_y = df['Survived'] cv = ShuffleSplit(n_splits=10, test_size=0.3, train_size=0.6, random_state=0) model_params = [{'name': 'RandomForestClassifier', 'estimator': RandomForestClassifier(random_state=0), 'hyperparameters': {'n_estimators': [20, 25, 35, 40, 45, 50, 55, 60, 65, 70, 75], 'criterion': ['entropy', 'gini'], 'max_features': ['log2', 'sqrt'], 'min_samples_leaf': [1, 5, 8], 'min_samples_split': [2, 3, 5]}}, {'name': 'DecisionTreeClassifier', 'estimator': tree.DecisionTreeClassifier(), 'hyperparameters': {'criterion': ['entropy', 'gini'], 'max_depth': [None, 2, 4, 6, 8, 10, 12, 14, 16], 'min_samples_split': [2, 3, 4, 5, 10, 0.03, 0.05, 0.1], 'max_features': [None, 'auto'], 'min_samples_leaf': [1, 2, 3, 4, 5, 10, 12, 0.5, 0.03, 0.05, 0.1]}}, {'name': 'KernelSVMClassifier', 'estimator': SVC(random_state=0), 'hyperparameters': {'kernel': ['rbf'], 'C': np.logspace(-9, 3, 13), 'gamma': np.logspace(-9, 3, 13)}}, {'name': 'KNeighborsClassifier', 'estimator': KNeighborsClassifier(), 'hyperparameters': {'n_neighbors': range(1, 20, 2), 'weights': ['distance', 'uniform'], 'algorithm': ['ball_tree', 'kd_tree', 'brute'], 'p': [1, 2]}}, {'name': 'LogisticRegressionClassifier', 'estimator': LogisticRegression(), 'hyperparameters': {'solver': ['newton-cg', 'lbfgs', 'liblinear']}}] models = [] for model in model_params: print(model['name']) grid = GridSearchCV(model['estimator'], param_grid=model['hyperparameters'], cv=10) grid.fit(train_X, train_y) model_att = {'model': grid.best_estimator_, 'best_params': grid.best_params_, 'best_score': grid.best_score_, 'grid': grid} models.append(model_att) print('Evaluated model and its params: ') print(grid.best_params_) print(grid.best_score_) return models def ann_model(df, features): classifier = Sequential() classifier.add(Dense(input_dim=len(features), units=15, activation='relu', kernel_initializer='uniform')) classifier.add(Dense(units=15, kernel_initializer='uniform', activation='relu')) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) train_X = df[features] train_y = df['Survived'] classifier.fit(np.array(train_X), np.array(train_y), batch_size=10, epochs=100) return classifier def get_features(df, columns, model=None): newDf = df.copy() newDf = newDf.select_dtypes(['number']) newDf = newDf.dropna(axis=1, how='any') all_X = newDf[columns] all_y = df['Survived'] cv = StratifiedShuffleSplit(n_splits=10, test_size=0.3, train_size=0.6, random_state=0) if model == None: classifier = tree.DecisionTreeClassifier(criterion='entropy', max_depth=10, max_features='auto', max_leaf_nodes=None, min_impurity_decrease=0.0, min_samples_leaf=10, min_samples_split=3) else: classifier = model selector = RFECV(classifier, scoring='roc_auc', cv=cv, step=1) selector.fit(all_X, all_y) rfecv_columns = all_X.columns[selector.support_] return rfecv_columns models = get_model(train, columns) best_grid = models[0]['grid'] best_classifier = models[0]['model'] best_params = models[0]['best_params'] rfecv_features = get_features(train, columns, best_classifier) print(len(rfecv_features)) print(rfecv_features) models = get_model(train, rfecv_features) best_classifier = models[0]['model'] predictions = best_classifier.predict(holdout[rfecv_features]) sub = {'PassengerId': holdout['PassengerId'], 'Survived': predictions} submission = pd.DataFrame(sub) submission.to_csv(path_or_buf='Submission.csv', index=False, header=True)
code
2022777/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train.isnull().sum() train.describe() holdout.describe()
code
2022777/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import sklearn.feature_selection from sklearn.feature_selection import RFECV from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.model_selection import GridSearchCV from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score from sklearn.preprocessing import minmax_scale from sklearn.svm import SVC from sklearn.naive_bayes import GaussianNB from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import ShuffleSplit import keras from keras.models import Sequential from keras.layers import Dense from sklearn import tree from subprocess import check_output print(check_output(['ls', '.']).decode('utf8'))
code
2022777/cell_3
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train.isnull().sum() gender_pivot = train.pivot_table(index='Sex', values='Survived') gender_pivot.plot.bar() plt.show() class_pivot = train.pivot_table(index='Pclass', values='Survived') class_pivot.plot.bar() plt.show() family_cols = ['SibSp', 'Parch', 'Survived'] family = train[family_cols].copy() family['familysize'] = family[['SibSp', 'Parch']].sum(axis=1) familySize = family[['SibSp', 'Parch']].sum(axis=1) family['isalone'] = np.where(familySize >= 1, 1, 0) family_pivot = family.pivot_table(index='familysize', values='Survived') isalone_pivot = family.pivot_table(index='isalone', values='Survived') isalone_pivot.plot.bar(ylim=(0, 1), yticks=np.arange(0, 1, 0.1)) family_pivot.plot.bar(ylim=(0, 1), yticks=np.arange(0, 1, 0.1)) plt.show()
code
2022777/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') holdout = pd.read_csv('../input/test.csv') train.isnull().sum() gender_pivot = train.pivot_table(index='Sex', values='Survived') class_pivot = train.pivot_table(index='Pclass', values='Survived') family_cols = ['SibSp', 'Parch', 'Survived'] family = train[family_cols].copy() family['familysize'] = family[['SibSp', 'Parch']].sum(axis=1) familySize = family[['SibSp', 'Parch']].sum(axis=1) family['isalone'] = np.where(familySize >= 1, 1, 0) family_pivot = family.pivot_table(index='familysize', values='Survived') isalone_pivot = family.pivot_table(index='isalone', values='Survived') train['Fare'] = train['Fare'].fillna(train['Fare'].mean()) train['Embarked'] = train['Embarked'].fillna('S') holdout['Fare'] = holdout['Fare'].fillna(train['Fare'].mean()) holdout['Embarked'] = holdout['Embarked'].fillna('S') train['Age'] = train['Age'].fillna(-0.5) holdout['Age'] = holdout['Age'].fillna(-0.5) cuts = [-1, 0, 5, 12, 18, 35, 60, 100] labels = ['Missing', 'Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train['Age_categories'] = pd.cut(train['Age'], cuts, labels=labels) holdout['Age_categories'] = pd.cut(holdout['Age'], cuts, labels=labels) fare_cuts = [-1, 12, 50, 100, 1000] fare_labels = ['0-12', '12-50', '50-100', '100+'] train['Fare_categories'] = pd.cut(train['Fare'], fare_cuts, labels=fare_labels) holdout['Fare_categories'] = pd.cut(holdout['Fare'], fare_cuts, labels=fare_labels) train['Cabin_type'] = train['Cabin'].str[0] train['Cabin_type'] = train['Cabin_type'].fillna('Unknown') train = train.drop('Cabin', axis=1) holdout['Cabin_type'] = holdout['Cabin'].str[0] holdout['Cabin_type'] = holdout['Cabin_type'].fillna('Unknown') holdout = holdout.drop('Cabin', axis=1) titles = {'Mr': 'Mr', 'Mme': 'Mrs', 'Ms': 'Mrs', 'Mrs': 'Mrs', 'Master': 'Master', 'Mlle': 'Miss', 'Miss': 'Miss', 'Capt': 'Officer', 'Col': 'Officer', 'Major': 'Officer', 'Dr': 'Officer', 'Rev': 'Officer', 'Jonkheer': 'Royalty', 'Don': 'Royalty', 'Sir': 'Royalty', 'Countess': 'Royalty', 'Dona': 'Royalty', 'Lady': 'Royalty'} train_titles = train['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) train['Title'] = train_titles.map(titles) holdout_titles = holdout['Name'].str.extract(' ([A-Za-z]+)\\.', expand=False) holdout['Title'] = holdout_titles.map(titles) familySize_train = train[['SibSp', 'Parch']].sum(axis=1) train['isalone'] = np.where(familySize_train >= 1, 1, 0) familySize_holdout = holdout[['SibSp', 'Parch']].sum(axis=1) holdout['isalone'] = np.where(familySize_holdout >= 1, 1, 0) def get_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df columnNames = ['Age_categories', 'Pclass', 'Sex', 'Fare_categories', 'Title', 'Cabin_type', 'Embarked'] for column in columnNames: dummies_train = pd.get_dummies(train[column], prefix=column) train = pd.concat([train, dummies_train], axis=1) dummies_holdout = pd.get_dummies(holdout[column], prefix=column) holdout = pd.concat([holdout, dummies_holdout], axis=1) train.head(5) holdout.head(5) print(holdout.columns)
code
48166874/cell_7
[ "text_plain_output_1.png" ]
from transformers import AutoModelWithLMHead, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-wikiSQL') model = AutoModelWithLMHead.from_pretrained('mrm8488/t5-base-finetuned-wikiSQL')
code
48166874/cell_14
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png" ]
from datasets import load_dataset from transformers import AutoModelWithLMHead, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-wikiSQL') model = AutoModelWithLMHead.from_pretrained('mrm8488/t5-base-finetuned-wikiSQL') def get_sql(query): input_text = 'translate English to SQL: %s </s>' % query features = tokenizer([input_text], return_tensors='pt') output = model.generate(input_ids=features['input_ids'], attention_mask=features['attention_mask']) return tokenizer.decode(output[0]) valid_dataset = load_dataset('wikisql', split='validation') for idx in random.sample(range(len(valid_dataset)), 200): print(f"Text: {valid_dataset[idx]['question']}") print(f"Pred SQL: {get_sql(valid_dataset[idx]['question'])}") print(f"True SQL: {valid_dataset[idx]['sql']['human_readable']}\n")
code
48166874/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from datasets import load_dataset valid_dataset = load_dataset('wikisql', split='validation')
code
48166874/cell_5
[ "text_plain_output_1.png" ]
from transformers import AutoModelWithLMHead, AutoTokenizer from datasets import load_dataset import random, warnings warnings.filterwarnings('ignore')
code
2007202/cell_21
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean()
code
2007202/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() sns.barplot(x='Embarked', y='Survived', hue='Sex', data=train)
code
2007202/cell_9
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train[['Pclass', 'Survived']].groupby(['Pclass']).mean().sort_values(by='Survived', ascending=False)
code
2007202/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
2007202/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean() train.loc[train.Age.isnull() & (train.Initial == 'Mr'), 'Age'] = 33 train.loc[train.Age.isnull() & (train.Initial == 'Mrs'), 'Age'] = 36 train.loc[train.Age.isnull() & (train.Initial == 'Master'), 'Age'] = 5 train.loc[train.Age.isnull() & (train.Initial == 'Miss'), 'Age'] = 22 train.loc[train.Age.isnull() & (train.Initial == 'Other'), 'Age'] = 46 test.loc[test.Age.isnull() & (test.Initial == 'Mr'), 'Age'] = 33 test.loc[test.Age.isnull() & (test.Initial == 'Mrs'), 'Age'] = 36 test.loc[test.Age.isnull() & (test.Initial == 'Master'), 'Age'] = 5 test.loc[test.Age.isnull() & (test.Initial == 'Miss'), 'Age'] = 22 test.loc[test.Age.isnull() & (test.Initial == 'Other'), 'Age'] = 46 sns.distplot(train['Fare'], bins=50)
code
2007202/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index print(categorical) train[categorical].describe()
code
2007202/cell_29
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean() train.loc[train.Age.isnull() & (train.Initial == 'Mr'), 'Age'] = 33 train.loc[train.Age.isnull() & (train.Initial == 'Mrs'), 'Age'] = 36 train.loc[train.Age.isnull() & (train.Initial == 'Master'), 'Age'] = 5 train.loc[train.Age.isnull() & (train.Initial == 'Miss'), 'Age'] = 22 train.loc[train.Age.isnull() & (train.Initial == 'Other'), 'Age'] = 46 test.loc[test.Age.isnull() & (test.Initial == 'Mr'), 'Age'] = 33 test.loc[test.Age.isnull() & (test.Initial == 'Mrs'), 'Age'] = 36 test.loc[test.Age.isnull() & (test.Initial == 'Master'), 'Age'] = 5 test.loc[test.Age.isnull() & (test.Initial == 'Miss'), 'Age'] = 22 test.loc[test.Age.isnull() & (test.Initial == 'Other'), 'Age'] = 46 train['Fare'].sort_values().unique()
code
2007202/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean() train.loc[train.Age.isnull() & (train.Initial == 'Mr'), 'Age'] = 33 train.loc[train.Age.isnull() & (train.Initial == 'Mrs'), 'Age'] = 36 train.loc[train.Age.isnull() & (train.Initial == 'Master'), 'Age'] = 5 train.loc[train.Age.isnull() & (train.Initial == 'Miss'), 'Age'] = 22 train.loc[train.Age.isnull() & (train.Initial == 'Other'), 'Age'] = 46 test.loc[test.Age.isnull() & (test.Initial == 'Mr'), 'Age'] = 33 test.loc[test.Age.isnull() & (test.Initial == 'Mrs'), 'Age'] = 36 test.loc[test.Age.isnull() & (test.Initial == 'Master'), 'Age'] = 5 test.loc[test.Age.isnull() & (test.Initial == 'Miss'), 'Age'] = 22 test.loc[test.Age.isnull() & (test.Initial == 'Other'), 'Age'] = 46 train = train.drop(['Ticket', 'Name'], axis=1) test = test.drop(['Ticket', 'Name'], axis=1) train.head()
code
2007202/cell_41
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean() train.loc[train.Age.isnull() & (train.Initial == 'Mr'), 'Age'] = 33 train.loc[train.Age.isnull() & (train.Initial == 'Mrs'), 'Age'] = 36 train.loc[train.Age.isnull() & (train.Initial == 'Master'), 'Age'] = 5 train.loc[train.Age.isnull() & (train.Initial == 'Miss'), 'Age'] = 22 train.loc[train.Age.isnull() & (train.Initial == 'Other'), 'Age'] = 46 test.loc[test.Age.isnull() & (test.Initial == 'Mr'), 'Age'] = 33 test.loc[test.Age.isnull() & (test.Initial == 'Mrs'), 'Age'] = 36 test.loc[test.Age.isnull() & (test.Initial == 'Master'), 'Age'] = 5 test.loc[test.Age.isnull() & (test.Initial == 'Miss'), 'Age'] = 22 test.loc[test.Age.isnull() & (test.Initial == 'Other'), 'Age'] = 46 train = train.drop(['Ticket', 'Name'], axis=1) test = test.drop(['Ticket', 'Name'], axis=1) sns.barplot(x='Age', y='Survived', hue='Sex', data=train)
code
2007202/cell_11
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train[['Parch', 'Survived']].groupby(['Parch']).mean().sort_values(by='Survived', ascending=False)
code
2007202/cell_19
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train['Initial'].replace(['Mlle', 'Mme', 'Ms', 'Dr', 'Major', 'Lady', 'Countess', 'Jonkheer', 'Col', 'Rev', 'Capt', 'Sir', 'Don'], ['Miss', 'Miss', 'Miss', 'Mr', 'Mr', 'Mrs', 'Mrs', 'Other', 'Other', 'Other', 'Mr', 'Mr', 'Mr'], inplace=True) test['Initial'].replace(['Mr', 'Mrs', 'Miss', 'Master', 'Don', 'Rev', 'Dr', 'Mme'], ['Mr', 'Mrs', 'Miss', 'Master', 'Mr', 'Other', 'Mr', 'Mrs'], inplace=True) print(train['Initial'].unique()) print(test['Initial'].unique())
code
2007202/cell_7
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum()
code
2007202/cell_18
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') print(train['Initial'].unique()) print(test['Initial'].unique())
code
2007202/cell_3
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
2007202/cell_17
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.')
code
2007202/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() sns.barplot(x='Pclass', y='Survived', hue='Sex', data=train)
code
2007202/cell_10
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train[['Sex', 'Survived']].groupby(['Sex']).mean().sort_values(by='Survived', ascending=False)
code
2007202/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train[['SibSp', 'Survived']].groupby(['SibSp']).mean().sort_values(by='Survived', ascending=False)
code
2007202/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.info()
code
2007202/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') categorical = train.dtypes[train.dtypes == 'object'].index train.isnull().sum() train['Initial'] = 0 for i in train: train['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') test['Initial'] = 0 for i in test: test['Initial'] = train.Name.str.extract('([A-Za-z]+)\\.') train.groupby('Initial')['Age'].mean() train.loc[train.Age.isnull() & (train.Initial == 'Mr'), 'Age'] = 33 train.loc[train.Age.isnull() & (train.Initial == 'Mrs'), 'Age'] = 36 train.loc[train.Age.isnull() & (train.Initial == 'Master'), 'Age'] = 5 train.loc[train.Age.isnull() & (train.Initial == 'Miss'), 'Age'] = 22 train.loc[train.Age.isnull() & (train.Initial == 'Other'), 'Age'] = 46 test.loc[test.Age.isnull() & (test.Initial == 'Mr'), 'Age'] = 33 test.loc[test.Age.isnull() & (test.Initial == 'Mrs'), 'Age'] = 36 test.loc[test.Age.isnull() & (test.Initial == 'Master'), 'Age'] = 5 test.loc[test.Age.isnull() & (test.Initial == 'Miss'), 'Age'] = 22 test.loc[test.Age.isnull() & (test.Initial == 'Other'), 'Age'] = 46 bins = (0, 5, 12, 18, 25, 35, 60, 120) group_names = ['Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] categories = pd.cut(train['Age'], bins, labels=group_names) train['Age'] = categories bins = (0, 5, 12, 18, 25, 35, 60, 120) group_names = ['Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior'] categories = pd.cut(test['Age'], bins, labels=group_names) test['Age'] = categories group_names = ['1Q', '2Q', '3Q', '4Q'] quartiles = pd.qcut(train['Fare'], 4, labels=group_names) train['Fare'] = quartiles group_names = ['1Q', '2Q', '3Q', '4Q'] quartiles_test = pd.qcut(test['Fare'], 4, labels=group_names) test['Fare'] = quartiles train = train.drop(['Ticket', 'Name'], axis=1) test = test.drop(['Ticket', 'Name'], axis=1) pd.crosstab(index=train['Embarked'], columns='count')
code
32063079/cell_8
[ "image_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) print(f'r2_score = {round(r2_score(y, y_fitted), 2)}') print(f'mean_squared_error = {round(mean_squared_error(y, y_fitted), 2)}') plt.figure(figsize=(10, 5)) plt.plot(x, y, label='Actual') plt.plot(x, y_fitted, label='Linear Regression') plt.xlabel(f'Days Since {threshold}th Case') plt.ylabel('Natural Logarithm of Confirmed Cases') plt.legend() plt.title(country) plt.show() plt.close()
code
32063079/cell_15
[ "text_plain_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import arviz as az import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) plt.close() country = 'US' threshold = 100 y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) with pm.Model() as unpooled_model: α = pm.Normal(name='α', mu=int(np.log(threshold)), sd=10) β = pm.Normal(name='β') σ = pm.HalfNormal(name='σ', sd=10) μ = pm.Deterministic(name='μ', var=α + β * x) pm.Normal(name=country, mu=μ, sd=σ, observed=y) pm.model_to_graphviz(unpooled_model) with unpooled_model: prior = pm.sample_prior_predictive() trace = pm.sample() pred = pm.sample_posterior_predictive(trace) unpooled = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=pred) prior_vars = ['α', 'β', 'σ'] unpooled az.summary(unpooled) az.plot_trace(data=unpooled, var_names=prior_vars)
code
32063079/cell_16
[ "text_html_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import arviz as az import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) plt.close() country = 'US' threshold = 100 y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) with pm.Model() as unpooled_model: α = pm.Normal(name='α', mu=int(np.log(threshold)), sd=10) β = pm.Normal(name='β') σ = pm.HalfNormal(name='σ', sd=10) μ = pm.Deterministic(name='μ', var=α + β * x) pm.Normal(name=country, mu=μ, sd=σ, observed=y) pm.model_to_graphviz(unpooled_model) with unpooled_model: prior = pm.sample_prior_predictive() trace = pm.sample() pred = pm.sample_posterior_predictive(trace) unpooled = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=pred) prior_vars = ['α', 'β', 'σ'] unpooled az.summary(unpooled) az.plot_posterior(data=unpooled, var_names=prior_vars, group='posterior')
code
32063079/cell_3
[ "image_output_1.png" ]
from pathlib import Path import cufflinks as cf import pandas as pd pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/')
code
32063079/cell_17
[ "image_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import arviz as az import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) plt.close() country = 'US' threshold = 100 y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) with pm.Model() as unpooled_model: α = pm.Normal(name='α', mu=int(np.log(threshold)), sd=10) β = pm.Normal(name='β') σ = pm.HalfNormal(name='σ', sd=10) μ = pm.Deterministic(name='μ', var=α + β * x) pm.Normal(name=country, mu=μ, sd=σ, observed=y) pm.model_to_graphviz(unpooled_model) with unpooled_model: prior = pm.sample_prior_predictive() trace = pm.sample() pred = pm.sample_posterior_predictive(trace) unpooled = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=pred) prior_vars = ['α', 'β', 'σ'] unpooled az.summary(unpooled) az.plot_posterior(data=unpooled, var_names=prior_vars, group='prior')
code
32063079/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import arviz as az import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) plt.close() country = 'US' threshold = 100 y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) with pm.Model() as unpooled_model: α = pm.Normal(name='α', mu=int(np.log(threshold)), sd=10) β = pm.Normal(name='β') σ = pm.HalfNormal(name='σ', sd=10) μ = pm.Deterministic(name='μ', var=α + β * x) pm.Normal(name=country, mu=μ, sd=σ, observed=y) pm.model_to_graphviz(unpooled_model) with unpooled_model: prior = pm.sample_prior_predictive() trace = pm.sample() pred = pm.sample_posterior_predictive(trace) unpooled = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=pred) prior_vars = ['α', 'β', 'σ'] unpooled az.summary(unpooled)
code
32063079/cell_12
[ "text_plain_output_1.png" ]
from ipywidgets import interact, interact_manual, fixed from pathlib import Path from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score, mean_squared_error import cufflinks as cf import matplotlib.pyplot as plt import numpy as np import pandas as pd import pymc3 as pm pd.set_option('display.max_rows', 500) pd.set_option('use_inf_as_na', True) cf.set_config_file(offline=True, theme='solar') path = Path('../input/novel-corona-virus-2019-dataset/') master_df = pd.read_csv(path / 'covid_19_data.csv') recovered_df = pd.read_csv(path / 'time_series_covid_19_recovered.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() deaths_df = pd.read_csv(path / 'time_series_covid_19_deaths.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() confirmed_df = pd.read_csv(path / 'time_series_covid_19_confirmed.csv').drop(columns=['Lat', 'Long']).groupby('Country/Region').sum() sorted_country_list = confirmed_df.sort_values(by=confirmed_df.columns[-1], ascending=False).index.to_list() @interact(country=sorted_country_list, threshold=(0, 1000, 10)) def log_lin_visualise(country, threshold=100): y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) lr = LinearRegression(normalize=True) lr.fit(X=x.reshape(-1, 1), y=y) y_fitted = lr.predict(X=x.reshape(-1, 1)) plt.close() country = 'US' threshold = 100 y = confirmed_df.filter(items=[country], axis=0).values.squeeze(0) y = np.log(y[y > threshold]) x = np.arange(1, y.shape[0] + 1) with pm.Model() as unpooled_model: α = pm.Normal(name='α', mu=int(np.log(threshold)), sd=10) β = pm.Normal(name='β') σ = pm.HalfNormal(name='σ', sd=10) μ = pm.Deterministic(name='μ', var=α + β * x) pm.Normal(name=country, mu=μ, sd=σ, observed=y) pm.model_to_graphviz(unpooled_model) with unpooled_model: prior = pm.sample_prior_predictive() trace = pm.sample() pred = pm.sample_posterior_predictive(trace)
code
1002861/cell_4
[ "image_output_1.png" ]
from collections import Counter import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import tensorflow as tf import tensorflow as tf import numpy as np video_lvl_record = '../input/video_level/train-1.tfrecord' frame_lvl_record = '../input/frame_level/train-1.tfrecord' vid_ids = [] labels = [] mean_rgb = [] mean_audio = [] for example in tf.python_io.tf_record_iterator(video_lvl_record): tf_example = tf.train.Example.FromString(example) vid_ids.append(tf_example.features.feature['video_id'].bytes_list.value[0].decode(encoding='UTF-8')) labels.append(tf_example.features.feature['labels'].int64_list.value) mean_rgb.append(tf_example.features.feature['mean_rgb'].float_list.value) mean_audio.append(tf_example.features.feature['mean_audio'].float_list.value) n = 20 from collections import Counter label_mapping = pd.Series.from_csv('../input/label_names.csv', header=0) label_dict = label_mapping.to_dict() top_n = Counter([item for sublist in labels for item in sublist]).most_common(n) top_n_labels = [int(i[0]) for i in top_n] top_n_label_count = [int(i[1]) for i in top_n] top_n_label_names = [label_dict[x] for x in top_n_labels] top_labels = pd.DataFrame(data=top_n_labels, columns=['label_num']) top_labels['count'] = top_n_label_count top_labels['label_name'] = top_n_label_names top_labels = top_labels.drop('label_num', axis=1) import seaborn as sns import matplotlib.pyplot as plt ax = sns.barplot(x='label_name', y='count', data=top_labels) ax.set(xlabel='Label Name', ylabel='Label Count') ax.set_xticklabels(ax.xaxis.get_majorticklabels(), rotation=45) _ = plt.show()
code
1002861/cell_6
[ "text_plain_output_1.png" ]
import tensorflow as tf import tensorflow as tf import numpy as np video_lvl_record = '../input/video_level/train-1.tfrecord' frame_lvl_record = '../input/frame_level/train-1.tfrecord' vid_ids = [] labels = [] mean_rgb = [] mean_audio = [] for example in tf.python_io.tf_record_iterator(video_lvl_record): tf_example = tf.train.Example.FromString(example) vid_ids.append(tf_example.features.feature['video_id'].bytes_list.value[0].decode(encoding='UTF-8')) labels.append(tf_example.features.feature['labels'].int64_list.value) mean_rgb.append(tf_example.features.feature['mean_rgb'].float_list.value) mean_audio.append(tf_example.features.feature['mean_audio'].float_list.value) test = labels[:5] label_test = [item for sublist in test for item in sublist] label_test
code
1002861/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1002861/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import tensorflow as tf import tensorflow as tf import numpy as np video_lvl_record = '../input/video_level/train-1.tfrecord' frame_lvl_record = '../input/frame_level/train-1.tfrecord' vid_ids = [] labels = [] mean_rgb = [] mean_audio = [] for example in tf.python_io.tf_record_iterator(video_lvl_record): tf_example = tf.train.Example.FromString(example) vid_ids.append(tf_example.features.feature['video_id'].bytes_list.value[0].decode(encoding='UTF-8')) labels.append(tf_example.features.feature['labels'].int64_list.value) mean_rgb.append(tf_example.features.feature['mean_rgb'].float_list.value) mean_audio.append(tf_example.features.feature['mean_audio'].float_list.value)
code
17131741/cell_3
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd df = pd.read_csv('../input/raw_lemonade_data.csv') df['Date'] = pd.to_datetime(df['Date']) df['Price'] = df.Price.str.replace('$', '').replace(' ', '') df['Price'] = df.Price.astype(np.float64) df = df.set_index(df['Date']) df = df.drop('Date', 1) df['Revenue'] = df.Price * df.Sales df = df[['Revenue', 'Temperature', 'Rainfall', 'Flyers']] df.head()
code
128033347/cell_21
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) xtrain = train.x.values.reshape(-1, 1) ytrain = train.y.values.reshape(-1, 1) lm = LinearRegression() lm.fit(xtrain, ytrain) xtest = test.x.values.reshape(-1, 1) ytest = test.y.values.reshape(-1, 1) prediction = lm.predict(xtest) from sklearn import metrics print('MAE:', metrics.mean_absolute_error(ytest, prediction)) print('MSE:', metrics.mean_squared_error(ytest, prediction)) print('RMSE:', np.sqrt(metrics.mean_squared_error(ytest, prediction)))
code
128033347/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) train.info()
code
128033347/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) sns.scatterplot(train, x='x', y='y')
code
128033347/cell_19
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) xtrain = train.x.values.reshape(-1, 1) ytrain = train.y.values.reshape(-1, 1) lm = LinearRegression() lm.fit(xtrain, ytrain) xtest = test.x.values.reshape(-1, 1) ytest = test.y.values.reshape(-1, 1) prediction = lm.predict(xtest) sns.distplot(ytest - prediction, bins=50)
code
128033347/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128033347/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.info()
code
128033347/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) xtrain = train.x.values.reshape(-1, 1) ytrain = train.y.values.reshape(-1, 1) lm = LinearRegression() lm.fit(xtrain, ytrain) xtest = test.x.values.reshape(-1, 1) ytest = test.y.values.reshape(-1, 1) prediction = lm.predict(xtest) plt.scatter(prediction, ytest) plt.xlabel = 'predection' plt.ylabel = 'y test'
code
128033347/cell_15
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) xtrain = train.x.values.reshape(-1, 1) ytrain = train.y.values.reshape(-1, 1) lm = LinearRegression() lm.fit(xtrain, ytrain) print(lm.coef_)
code
128033347/cell_3
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns
code
128033347/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') train.dropna(inplace=True) xtrain = train.x.values.reshape(-1, 1) ytrain = train.y.values.reshape(-1, 1) lm = LinearRegression() lm.fit(xtrain, ytrain)
code
128033347/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test = pd.read_csv('/kaggle/input/random-linear-regression/test.csv') train = pd.read_csv('/kaggle/input/random-linear-regression/train.csv') test.describe()
code
73097245/cell_13
[ "text_html_output_1.png" ]
from numpy.linalg import norm from scipy.sparse import coo_matrix from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf for i in range(100004): mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating'] mydf user_list = list(mydf.index) movie_list = list(mydf.columns) from scipy.sparse import coo_matrix R = coo_matrix(mydf.values) M, N = R.shape K = 3 P = np.random.rand(M, K) Q = np.random.rand(K, N) R.data R.row R.col from numpy.linalg import norm def error(R, P, Q, lamda=0.02): ratings = R.data rows = R.row cols = R.col e = 0 for ui in range(len(ratings)): rui = ratings[ui] u = rows[ui] i = cols[ui] if rui > 0: e = e + pow(rui - np.dot(P[u, :], Q[:, i]), 2) + lamda * (pow(norm(P[u, :]), 2) + pow(norm(Q[:, i]), 2)) return e error(R, P, Q)
code
73097245/cell_9
[ "text_plain_output_1.png" ]
from scipy.sparse import coo_matrix from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf for i in range(100004): mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating'] mydf user_list = list(mydf.index) movie_list = list(mydf.columns) from scipy.sparse import coo_matrix R = coo_matrix(mydf.values) print('R Shape::', R.shape) print('R Columns::', R.col) print('R Rows::', R.row)
code
73097245/cell_6
[ "text_plain_output_1.png" ]
from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) ratings
code
73097245/cell_11
[ "text_plain_output_1.png" ]
from scipy.sparse import coo_matrix from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf for i in range(100004): mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating'] mydf user_list = list(mydf.index) movie_list = list(mydf.columns) from scipy.sparse import coo_matrix R = coo_matrix(mydf.values) M, N = R.shape K = 3 P = np.random.rand(M, K) Q = np.random.rand(K, N) R.data R.row R.col
code
73097245/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73097245/cell_7
[ "text_plain_output_1.png" ]
from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf for i in range(100004): mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating'] mydf
code
73097245/cell_3
[ "text_html_output_1.png" ]
from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() print(len(rows)) print(len(columns)) columns = np.sort(columns)
code
73097245/cell_14
[ "text_html_output_1.png" ]
from numpy.linalg import norm from scipy.sparse import coo_matrix from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf for i in range(100004): mydf.loc[ratings.loc[i, 'userId'], ratings.loc[i, 'movieId']] = ratings.loc[i, 'rating'] mydf user_list = list(mydf.index) movie_list = list(mydf.columns) from scipy.sparse import coo_matrix R = coo_matrix(mydf.values) M, N = R.shape K = 3 P = np.random.rand(M, K) Q = np.random.rand(K, N) R.data R.row R.col from numpy.linalg import norm def error(R, P, Q, lamda=0.02): ratings = R.data rows = R.row cols = R.col e = 0 for ui in range(len(ratings)): rui = ratings[ui] u = rows[ui] i = cols[ui] if rui > 0: e = e + pow(rui - np.dot(P[u, :], Q[:, i]), 2) + lamda * (pow(norm(P[u, :]), 2) + pow(norm(Q[:, i]), 2)) return e rmse = np.sqrt(error(R, P, Q) / len(R.data)) rmse
code
73097245/cell_5
[ "text_plain_output_1.png" ]
from surprise import Dataset,Reader,SVD import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from surprise import Dataset, Reader, SVD reader = Reader() ratings = pd.read_csv('../input/the-movies-dataset/ratings_small.csv') rows = ratings.userId.unique() columns = ratings.movieId.unique() columns = np.sort(columns) myData = np.array([0.0 for i in range(671 * 9066)]) mydf = pd.DataFrame(myData.reshape(671, -1)) mydf.columns = columns mydf.index = rows mydf
code
17118943/cell_23
[ "text_plain_output_1.png" ]
import keras as K import numpy as np import pandas as pd import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) dfBoston = pd.read_csv('../input/boston_mm_tab.csv', header=None) dfBoston X = dfBoston[np.arange(0, 13)] y = dfBoston[13] tf.logging.set_verbosity(tf.logging.ERROR) init = K.initializers.RandomUniform(seed=1) simple_sgd = K.optimizers.SGD(lr=0.01) model = K.models.Sequential() model.add(K.layers.Dense(units=10, input_dim=13, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=10, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=1, kernel_initializer=init, activation=None)) model.compile(loss='mean_squared_error', optimizer=simple_sgd, metrics=['mse']) batch_size = 8 max_epochs = 500 h = model.fit(X_train, y_train, batch_size=batch_size, epochs=max_epochs, shuffle=True, verbose=1) y_pred = model.predict(X_train) y_d = np.array(y_train).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) y_pred = model.predict(X_test) y_d = np.array(y_test).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) eval = model.evaluate(X_train, y_train, verbose=0) eval = model.evaluate(X_test, y_test, verbose=0) np.set_printoptions(precision=4) unknown = np.full(shape=(1, 13), fill_value=0.6, dtype=np.float32) unknown[0][3] = -1.0 predicted = model.predict(unknown) print('Usando o modelo para previsão de preço médio de casa para as caracteristicas: ') print(unknown) print('\nO preço médio será [dolares]: ') print(predicted * 10000)
code
17118943/cell_19
[ "text_plain_output_1.png" ]
import keras as K import numpy as np import pandas as pd import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) dfBoston = pd.read_csv('../input/boston_mm_tab.csv', header=None) dfBoston X = dfBoston[np.arange(0, 13)] y = dfBoston[13] tf.logging.set_verbosity(tf.logging.ERROR) init = K.initializers.RandomUniform(seed=1) simple_sgd = K.optimizers.SGD(lr=0.01) model = K.models.Sequential() model.add(K.layers.Dense(units=10, input_dim=13, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=10, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=1, kernel_initializer=init, activation=None)) model.compile(loss='mean_squared_error', optimizer=simple_sgd, metrics=['mse']) batch_size = 8 max_epochs = 500 h = model.fit(X_train, y_train, batch_size=batch_size, epochs=max_epochs, shuffle=True, verbose=1) y_pred = model.predict(X_train) y_d = np.array(y_train).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) y_pred = model.predict(X_test) y_d = np.array(y_test).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) eval = model.evaluate(X_train, y_train, verbose=0) print('Erro médio do conjunto de treinamento {0:.4f}'.format(eval[0])) eval = model.evaluate(X_test, y_test, verbose=0) print('Erro médio do conjunto de teste {0:.4f}'.format(eval[0]))
code
17118943/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
import keras as K import numpy as np import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) tf.logging.set_verbosity(tf.logging.ERROR) init = K.initializers.RandomUniform(seed=1) simple_sgd = K.optimizers.SGD(lr=0.01) model = K.models.Sequential() model.add(K.layers.Dense(units=10, input_dim=13, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=10, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=1, kernel_initializer=init, activation=None)) model.compile(loss='mean_squared_error', optimizer=simple_sgd, metrics=['mse']) batch_size = 8 max_epochs = 500 print('Iniciando treinamento... ') h = model.fit(X_train, y_train, batch_size=batch_size, epochs=max_epochs, shuffle=True, verbose=1) print('Treinamento finalizado \n')
code
17118943/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import keras as K import tensorflow as tf import pandas as pd import seaborn as sns import os from matplotlib import pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
code
17118943/cell_17
[ "text_plain_output_1.png" ]
import keras as K import numpy as np import pandas as pd import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) dfBoston = pd.read_csv('../input/boston_mm_tab.csv', header=None) dfBoston X = dfBoston[np.arange(0, 13)] y = dfBoston[13] tf.logging.set_verbosity(tf.logging.ERROR) init = K.initializers.RandomUniform(seed=1) simple_sgd = K.optimizers.SGD(lr=0.01) model = K.models.Sequential() model.add(K.layers.Dense(units=10, input_dim=13, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=10, kernel_initializer=init, activation='tanh')) model.add(K.layers.Dense(units=1, kernel_initializer=init, activation=None)) model.compile(loss='mean_squared_error', optimizer=simple_sgd, metrics=['mse']) batch_size = 8 max_epochs = 500 h = model.fit(X_train, y_train, batch_size=batch_size, epochs=max_epochs, shuffle=True, verbose=1) y_pred = model.predict(X_train) y_d = np.array(y_train).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) print('Taxa de acerto do conjunto de treinamento (%): {0:.4f}'.format(acc * 100)) y_pred = model.predict(X_test) y_d = np.array(y_test).reshape(-1, 1) results = abs(y_pred - y_d) < np.abs(0.15 * y_d) results acc = np.sum(results) / len(results) print('Taxa de acerto do conjunto de teste (%): {0:.4f}'.format(acc * 100))
code
72086533/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') useful_features = [c for c in df_train.columns if c not in ('id', 'loss', 'kfold')] df_train[useful_features]
code
72086533/cell_10
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') useful_features = [c for c in df_train.columns if c not in ('id', 'loss', 'kfold')] df_train[useful_features] from xgboost import XGBRegressor import numpy as np final_preds = [] for fold in range(5): xtrain = df_train[df_train.kfold != fold] xvalid = df_train[df_train.kfold == fold] ytrain = xtrain['loss'] xtrain = xtrain[useful_features] yvalid = xvalid['loss'] xvalid = xvalid[useful_features] model = XGBRegressor(n_estimators=500, random_state=fold) model.fit(xtrain, ytrain, early_stopping_rounds=5, eval_set=[(xvalid, yvalid)], verbose=False) preds_valid = model.predict(xvalid) test = df_test[useful_features] test_preds = model.predict(test) final_preds.append(test_preds) print(mean_squared_error(yvalid, preds_valid, squared=False))
code
72086533/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') sample_submission.head()
code
2011179/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.isnull().values.any() movies = movies.dropna() ind_animation = 'Animation' ind_children = 'Children' animation1 = movies['genres'].str.contains(ind_animation) animation0 = ~movies['genres'].str.contains(ind_animation) children1 = movies['genres'].str.contains(ind_children) children0 = ~movies['genres'].str.contains(ind_children) both = movies[animation1 & children1] just_anim = movies[animation1 & children0] just_chil = movies[animation0 & children1] just_anim_plt = just_anim[['rating', 'year']] just_anim_plt = just_anim_plt.groupby(['year'], as_index=False).mean() just_anim_plt.head(15)
code
2011179/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.head()
code
2011179/cell_20
[ "text_html_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.isnull().values.any() movies = movies.dropna() ind_animation = 'Animation' ind_children = 'Children' animation1 = movies['genres'].str.contains(ind_animation) animation0 = ~movies['genres'].str.contains(ind_animation) children1 = movies['genres'].str.contains(ind_children) children0 = ~movies['genres'].str.contains(ind_children) both = movies[animation1 & children1] just_anim = movies[animation1 & children0] just_chil = movies[animation0 & children1] print('The dataset which includes both Animation and Children genres has {0} rows.'.format(len(both))) print('The dataset which includes just Animation genre has {0} rows.'.format(len(just_anim))) print('The dataset which includes just Children genre has {0} rows.'.format(len(just_chil)))
code
2011179/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') tags.head()
code
2011179/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.isnull().values.any() movies = movies.dropna() ind_animation = 'Animation' ind_children = 'Children' animation1 = movies['genres'].str.contains(ind_animation) animation0 = ~movies['genres'].str.contains(ind_animation) children1 = movies['genres'].str.contains(ind_children) children0 = ~movies['genres'].str.contains(ind_children) both = movies[animation1 & children1] just_anim = movies[animation1 & children0] just_chil = movies[animation0 & children1] just_chil.head()
code
2011179/cell_7
[ "text_html_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') ratings.head()
code
2011179/cell_18
[ "text_html_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.isnull().values.any() movies = movies.dropna() ind_animation = 'Animation' ind_children = 'Children' animation1 = movies['genres'].str.contains(ind_animation) animation0 = ~movies['genres'].str.contains(ind_animation) children1 = movies['genres'].str.contains(ind_children) children0 = ~movies['genres'].str.contains(ind_children) both = movies[animation1 & children1] just_anim = movies[animation1 & children0] just_chil = movies[animation0 & children1] just_anim.head()
code
2011179/cell_8
[ "text_html_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any()
code
2011179/cell_3
[ "text_html_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2011179/cell_17
[ "text_html_output_1.png" ]
import pandas as pd movies = pd.read_csv('../input/movie.csv') tags = pd.read_csv('../input/tag.csv') ratings = pd.read_csv('../input/rating.csv') movies.isnull().values.any() movies.isnull().values.any() movies = movies.dropna() ind_animation = 'Animation' ind_children = 'Children' animation1 = movies['genres'].str.contains(ind_animation) animation0 = ~movies['genres'].str.contains(ind_animation) children1 = movies['genres'].str.contains(ind_children) children0 = ~movies['genres'].str.contains(ind_children) both = movies[animation1 & children1] just_anim = movies[animation1 & children0] just_chil = movies[animation0 & children1] both.head()
code