path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72068232/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import os df = pd.read_csv('/kaggle/input/emotion-detection-from-text/tweet_emotions.csv') df.content.iloc[-10:] df.sentiment.value_counts()
code
105200230/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import csv import numpy as np import re import pandas as pd from tqdm.notebook import tqdm from fuzzywuzzy import fuzz BANK_FILE = '../input/payment-id-ndsc-2020/bank_statement.csv' CHECKOUT_FILE = '../input/payment-id-ndsc-2020/checkout.csv' bank = pd.read_csv(BANK_FILE) checkout = pd.read_csv(CHECKOUT_FILE) print(bank.columns) print(checkout.columns)
code
105200230/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import csv import numpy as np import re import pandas as pd from tqdm.notebook import tqdm from fuzzywuzzy import fuzz BANK_FILE = '../input/payment-id-ndsc-2020/bank_statement.csv' CHECKOUT_FILE = '../input/payment-id-ndsc-2020/checkout.csv' bank = pd.read_csv(BANK_FILE) checkout = pd.read_csv(CHECKOUT_FILE) print(len(bank)) print(len(checkout)) bank = bank.sort_values(by=['stmt_amount'], ascending=True) checkout = checkout.sort_values(by=['ckt_amount'], ascending=True)
code
105200230/cell_5
[ "text_plain_output_1.png" ]
from fuzzywuzzy import fuzz from tqdm.notebook import tqdm import csv import pandas as pd import csv import numpy as np import re import pandas as pd from tqdm.notebook import tqdm from fuzzywuzzy import fuzz BANK_FILE = '../input/payment-id-ndsc-2020/bank_statement.csv' CHECKOUT_FILE = '../input/payment-id-ndsc-2020/checkout.csv' bank = pd.read_csv(BANK_FILE) checkout = pd.read_csv(CHECKOUT_FILE) bank = bank.sort_values(by=['stmt_amount'], ascending=True) checkout = checkout.sort_values(by=['ckt_amount'], ascending=True) grouped_bank = bank.groupby('stmt_amount') grouped_checkout = checkout.groupby('ckt_amount') amount_list = list(set(bank['stmt_amount'].tolist())) len(amount_list) amount_list.sort() local_bank = bank local_checkout = checkout pair_list = [] paired_data = [] pbar = tqdm(total=len(bank)) skipped_amounts = [] matchless = [] matchless_count = 0 for amount in amount_list: bank_group = grouped_bank.get_group(amount) checkout_group = grouped_checkout.get_group(amount) skip_list = [] matchless_count += len(checkout_group) matchless_min_score = 120 while len(checkout_group) > 0 or len(bank_group) > 0: matchless_min_score -= 20 for bank_index, bank_row in bank_group.iterrows(): current = bank_index for checkout_index, checkout_row in checkout_group.iterrows(): pbar.desc = 'Purge {} {} With {} data {} miss'.format(matchless_min_score, amount, len(bank_group), len(checkout_group)) fuzz_result = fuzz.token_set_ratio(checkout_row['buyer_name'], bank_row['desc']) if fuzz_result >= matchless_min_score: pbar.update(1) checkout_group = checkout_group.drop(checkout_index) bank_group = bank_group.drop(bank_index) paired_data.append([bank_row['stmt_id'], checkout_row['ckt_id']]) matchless_count -= 1 break with open('paired_data_long.csv', 'w', newline='') as file: writer = csv.writer(file) writer.writerow(['stmt_id', 'ckt_id']) writer.writerows(paired_data)
code
18100538/cell_4
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.profile_report(style={'full_width': True})
code
18100538/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np import pandas_profiling as pdp from sklearn.linear_model import LogisticRegression pd.set_option('max_rows', 1200) pd.set_option('max_columns', 1000) cr = pd.read_csv('../input/Loan payments data.csv') cr.head()
code
2036992/cell_42
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values ET.fit(X_train, Y_train)
code
2036992/cell_21
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) df_train.head() X_train.head()
code
2036992/cell_23
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) df_train_stdrop.head()
code
2036992/cell_44
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values GB.fit(X_train, Y_train)
code
2036992/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
df_train.head()
code
2036992/cell_40
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values RF.fit(X_train, Y_train)
code
2036992/cell_29
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) LR = linear_model.LinearRegression() LR.fit(X_train, Y_train) lr_score = LR.score(X_test, Y_test) print('Linear regression processing ,,,') print('Linear regression Score: %.2f %%' % lr_score)
code
2036992/cell_39
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values AB.fit(X_train, Y_train) AB_feature = AB.feature_importances_ AB_feature ab_score = AB.score(X_test, Y_test) print('AdaBoostClassifier processing ,,,') print('AdaBoostClassifier Score: %.3f %%' % ab_score)
code
2036992/cell_41
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values RF.fit(X_train, Y_train) RF_feature = RF.feature_importances_ RF_feature rf_score = RF.score(X_test, Y_test) print('RandomForestClassifier processing ,,,') print('RandomForestClassifier Score: %.3f %%' % rf_score)
code
2036992/cell_7
[ "text_html_output_1.png" ]
df_test.head()
code
2036992/cell_45
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values GB.fit(X_train, Y_train) GB_feature = GB.feature_importances_ GB_feature gb_score = GB.score(X_test, Y_test) print('GradientBoostingClassifier processing ,,,') print('GradientBoostingClassifier Score: %.3f %%' % gb_score)
code
2036992/cell_49
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values AB.fit(X_train, Y_train) AB_feature = AB.feature_importances_ AB_feature ab_score = AB.score(X_test, Y_test) RF.fit(X_train, Y_train) RF_feature = RF.feature_importances_ RF_feature rf_score = RF.score(X_test, Y_test) ET.fit(X_train, Y_train) ET_feature = ET.feature_importances_ ET_feature et_score = ET.score(X_test, Y_test) GB.fit(X_train, Y_train) GB_feature = GB.feature_importances_ GB_feature gb_score = GB.score(X_test, Y_test) cols = X_train.columns.values feature_df = pd.DataFrame({'features': cols, 'AdaBoost': AB_feature, 'RandomForest': RF_feature, 'ExtraTree': ET_feature, 'GradientBoost': GB_feature}) feature_df.head(8)
code
2036992/cell_18
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() std_X_train.nsmallest(10, columns=0).head(10)
code
2036992/cell_51
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values AB.fit(X_train, Y_train) AB_feature = AB.feature_importances_ AB_feature ab_score = AB.score(X_test, Y_test) RF.fit(X_train, Y_train) RF_feature = RF.feature_importances_ RF_feature rf_score = RF.score(X_test, Y_test) ET.fit(X_train, Y_train) ET_feature = ET.feature_importances_ ET_feature et_score = ET.score(X_test, Y_test) GB.fit(X_train, Y_train) GB_feature = GB.feature_importances_ GB_feature gb_score = GB.score(X_test, Y_test) cols = X_train.columns.values feature_df = pd.DataFrame({'features': cols, 'AdaBoost': AB_feature, 'RandomForest': RF_feature, 'ExtraTree': ET_feature, 'GradientBoost': GB_feature}) from matplotlib.ticker import MaxNLocator from collections import namedtuple graph = feature_df.plot.bar(figsize=(18, 10), title='Feature distribution', grid=True, legend=True, fontsize=15, xticks=feature_df.index) graph.set_xticklabels(feature_df.features, rotation=80)
code
2036992/cell_28
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) LR = linear_model.LinearRegression() LR.fit(X_train, Y_train)
code
2036992/cell_16
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) Y_train.describe()
code
2036992/cell_38
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values AB.fit(X_train, Y_train)
code
2036992/cell_43
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import OneHotEncoder from sklearn.tree import DecisionTreeClassifier import pandas as pd X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) Y_train['xAttack'] = le.fit_transform(Y_train['xAttack']) lb.fit_transform(Y_train['xAttack']) Y_test['xAttack'] = le.fit_transform(Y_test['xAttack']) lb.fit_transform(Y_test['xAttack']) con_list = ['protocol_type', 'service', 'flag', 'land', 'logged_in', 'su_attempted', 'is_host_login', 'is_guest_login'] con_train = X_train.drop(con_list, axis=1) stdtrain = con_train.std(axis=0) std_X_train = stdtrain.to_frame() X_train = X_train.drop(['num_outbound_cmds'], axis=1) X_test = X_test.drop(['num_outbound_cmds'], axis=1) df_train = pd.concat([X_train, Y_train], axis=1) stdrop_list = ['urgent', 'num_shells', 'root_shell', 'num_failed_logins', 'num_access_files', 'dst_host_srv_diff_host_rate', 'diff_srv_rate', 'dst_host_diff_srv_rate', 'wrong_fragment'] X_test_stdrop = X_test.drop(stdrop_list, axis=1) X_train_stdrop = X_train.drop(stdrop_list, axis=1) df_train_stdrop = pd.concat([X_train_stdrop, Y_train], axis=1) AB = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(), n_estimators=100, learning_rate=1.0) RF = RandomForestClassifier(n_estimators=10, criterion='entropy', max_features='auto', bootstrap=True) ET = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_features='auto', bootstrap=False) GB = GradientBoostingClassifier(loss='deviance', learning_rate=0.1, n_estimators=200, max_features='auto') y_train = Y_train['xAttack'].ravel() x_train = X_train.values x_test = X_test.values ET.fit(X_train, Y_train) ET_feature = ET.feature_importances_ ET_feature et_score = ET.score(X_test, Y_test) print('ExtraTreesClassifier processing ,,,') print('ExtraTreeClassifier: %.3f %%' % et_score)
code
2036992/cell_14
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.preprocessing import OneHotEncoder X_train = df_train.drop('xAttack', axis=1) Y_train = df_train.loc[:, ['xAttack']] X_test = df_test.drop('xAttack', axis=1) Y_test = df_test.loc[:, ['xAttack']] le = preprocessing.LabelEncoder() enc = OneHotEncoder() lb = preprocessing.LabelBinarizer() X_train['protocol_type'] = le.fit_transform(X_train['protocol_type']) X_test['protocol_type'] = le.fit_transform(X_test['protocol_type']) X_train.head()
code
1005471/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] def get_value_counts(col, df): return pd.DataFrame(df[col].value_counts()) global_bar_chart_settings = {'height': 4, 'width': 8, 'bar_width': 0.9, 'title': 'Number of occurrences of ', 'ylabel': 'Occurrence', 'alpha': None, 'lbl_fontsize': 15, 'title_fontsize': 20} def plot_bar(chart_settings, df, column): width = global_bar_chart_settings['width'] height = global_bar_chart_settings['height'] alpha = global_bar_chart_settings['alpha'] title = global_bar_chart_settings['title'] bar_width = global_bar_chart_settings['bar_width'] ylabel = global_bar_chart_settings['ylabel'] lbl_fontsize = global_bar_chart_settings['lbl_fontsize'] title_fontsize = global_bar_chart_settings['title_fontsize'] chart_keys = chart_settings.keys() if 'width' in chart_keys: width = chart_settings['width'] if 'height' in chart_keys: height = chart_settings['height'] if 'title' in chart_keys: title = chart_settings['title'] if 'bar_width' in chart_keys: bar_width = chart_settings['bar_width'] if 'lbl_fontsize' in chart_keys: lbl_fontsize = chart_settings['lbl_fontsize'] if 'title_fontsize' in chart_keys: title_fontsize = chart_settings['title_fontsize'] fig, ax = plt.subplots(figsize = (width, height)) ind = np.arange(len(df.index)) values = df[column] rects = ax.bar(ind, values, bar_width, alpha=alpha) ax.set_ylabel(ylabel, fontsize=lbl_fontsize) ax.set_title(title + column, fontsize=title_fontsize) ax.set_xticks(ind + 0.1 / 2) ax.set_xticklabels(df.index) plt.show() uniq_interest_levels = list(train_data[target].unique()) interest_level_groups = train_data.groupby(target) uniq_interest_levels title = 'Number of occurrences in high interest level for ' title = 'Number of occurrences in medium interest level for ' plot_bar({'title': title, 'title_fontsize': 15}, get_value_counts('bathrooms', interest_level_groups.get_group('medium')), 'bathrooms')
code
1005471/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] bathroom_df = train_data[['bathrooms', 'interest_level']] def get_value_counts(col, df): return pd.DataFrame(df[col].value_counts()) global_bar_chart_settings = {'height': 4, 'width': 8, 'bar_width': 0.9, 'title': 'Number of occurrences of ', 'ylabel': 'Occurrence', 'alpha': None, 'lbl_fontsize': 15, 'title_fontsize': 20} def plot_bar(chart_settings, df, column): width = global_bar_chart_settings['width'] height = global_bar_chart_settings['height'] alpha = global_bar_chart_settings['alpha'] title = global_bar_chart_settings['title'] bar_width = global_bar_chart_settings['bar_width'] ylabel = global_bar_chart_settings['ylabel'] lbl_fontsize = global_bar_chart_settings['lbl_fontsize'] title_fontsize = global_bar_chart_settings['title_fontsize'] chart_keys = chart_settings.keys() if 'width' in chart_keys: width = chart_settings['width'] if 'height' in chart_keys: height = chart_settings['height'] if 'title' in chart_keys: title = chart_settings['title'] if 'bar_width' in chart_keys: bar_width = chart_settings['bar_width'] if 'lbl_fontsize' in chart_keys: lbl_fontsize = chart_settings['lbl_fontsize'] if 'title_fontsize' in chart_keys: title_fontsize = chart_settings['title_fontsize'] fig, ax = plt.subplots(figsize = (width, height)) ind = np.arange(len(df.index)) values = df[column] rects = ax.bar(ind, values, bar_width, alpha=alpha) ax.set_ylabel(ylabel, fontsize=lbl_fontsize) ax.set_title(title + column, fontsize=title_fontsize) ax.set_xticks(ind + 0.1 / 2) ax.set_xticklabels(df.index) plt.show() plot_bar({}, get_value_counts('bathrooms', bathroom_df), 'bathrooms')
code
1005471/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0]
code
1005471/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] uniq_interest_levels = list(train_data[target].unique()) interest_level_groups = train_data.groupby(target) uniq_interest_levels
code
1005471/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1005471/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] def get_value_counts(col, df): return pd.DataFrame(df[col].value_counts()) global_bar_chart_settings = {'height': 4, 'width': 8, 'bar_width': 0.9, 'title': 'Number of occurrences of ', 'ylabel': 'Occurrence', 'alpha': None, 'lbl_fontsize': 15, 'title_fontsize': 20} def plot_bar(chart_settings, df, column): width = global_bar_chart_settings['width'] height = global_bar_chart_settings['height'] alpha = global_bar_chart_settings['alpha'] title = global_bar_chart_settings['title'] bar_width = global_bar_chart_settings['bar_width'] ylabel = global_bar_chart_settings['ylabel'] lbl_fontsize = global_bar_chart_settings['lbl_fontsize'] title_fontsize = global_bar_chart_settings['title_fontsize'] chart_keys = chart_settings.keys() if 'width' in chart_keys: width = chart_settings['width'] if 'height' in chart_keys: height = chart_settings['height'] if 'title' in chart_keys: title = chart_settings['title'] if 'bar_width' in chart_keys: bar_width = chart_settings['bar_width'] if 'lbl_fontsize' in chart_keys: lbl_fontsize = chart_settings['lbl_fontsize'] if 'title_fontsize' in chart_keys: title_fontsize = chart_settings['title_fontsize'] fig, ax = plt.subplots(figsize = (width, height)) ind = np.arange(len(df.index)) values = df[column] rects = ax.bar(ind, values, bar_width, alpha=alpha) ax.set_ylabel(ylabel, fontsize=lbl_fontsize) ax.set_title(title + column, fontsize=title_fontsize) ax.set_xticks(ind + 0.1 / 2) ax.set_xticklabels(df.index) plt.show() uniq_interest_levels = list(train_data[target].unique()) interest_level_groups = train_data.groupby(target) uniq_interest_levels title = 'Number of occurrences in high interest level for ' title = 'Number of occurrences in medium interest level for ' title = 'Number of occurrences in low interest level for ' plot_bar({'title': title, 'title_fontsize': 15}, get_value_counts('bathrooms', interest_level_groups.get_group('low')), 'bathrooms')
code
1005471/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] def get_value_counts(col, df): return pd.DataFrame(df[col].value_counts()) global_bar_chart_settings = {'height': 4, 'width': 8, 'bar_width': 0.9, 'title': 'Number of occurrences of ', 'ylabel': 'Occurrence', 'alpha': None, 'lbl_fontsize': 15, 'title_fontsize': 20} def plot_bar(chart_settings, df, column): width = global_bar_chart_settings['width'] height = global_bar_chart_settings['height'] alpha = global_bar_chart_settings['alpha'] title = global_bar_chart_settings['title'] bar_width = global_bar_chart_settings['bar_width'] ylabel = global_bar_chart_settings['ylabel'] lbl_fontsize = global_bar_chart_settings['lbl_fontsize'] title_fontsize = global_bar_chart_settings['title_fontsize'] chart_keys = chart_settings.keys() if 'width' in chart_keys: width = chart_settings['width'] if 'height' in chart_keys: height = chart_settings['height'] if 'title' in chart_keys: title = chart_settings['title'] if 'bar_width' in chart_keys: bar_width = chart_settings['bar_width'] if 'lbl_fontsize' in chart_keys: lbl_fontsize = chart_settings['lbl_fontsize'] if 'title_fontsize' in chart_keys: title_fontsize = chart_settings['title_fontsize'] fig, ax = plt.subplots(figsize = (width, height)) ind = np.arange(len(df.index)) values = df[column] rects = ax.bar(ind, values, bar_width, alpha=alpha) ax.set_ylabel(ylabel, fontsize=lbl_fontsize) ax.set_title(title + column, fontsize=title_fontsize) ax.set_xticks(ind + 0.1 / 2) ax.set_xticklabels(df.index) plt.show() uniq_interest_levels = list(train_data[target].unique()) interest_level_groups = train_data.groupby(target) uniq_interest_levels title = 'Number of occurrences in high interest level for ' plot_bar({'title': title, 'title_fontsize': 15}, get_value_counts('bathrooms', interest_level_groups.get_group('high')), 'bathrooms')
code
1005471/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_json('../input/train.json') display_count = 3 target = 'interest_level' train_data.iloc[0] bathroom_df = train_data[['bathrooms', 'interest_level']] bathroom_df.head(display_count)
code
32072743/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_test.loc[df_test['Province_State'].isnull(), 'Province_State'] = 'None' df_test.isnull().sum()
code
32072743/cell_23
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.tail()
code
32072743/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum()
code
32072743/cell_44
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range)) worst_affected = latest.sort_values(by='ConfirmedCases', ascending=False).head(20) worst_affected.drop(columns=['Id', 'Date'], inplace=True) worst_affected worst_affected_locations = [worst_affected['Province_State'].iloc[i] if worst_affected['Province_State'].iloc[i] != 'None' else worst_affected['Country_Region'].iloc[i] for i in range(len(worst_affected))] worst_affected_locations plt.figure(figsize=(18, 9)) plt.bar(worst_affected_locations, worst_affected['ConfirmedCases']) plt.title('Number of Confirmed Cases in the 20 Worst Affected Locations') plt.ylabel('Number of confirmed cases') plt.xticks(rotation='vertical') plt.show()
code
32072743/cell_40
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range))
code
32072743/cell_41
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range)) worst_affected = latest.sort_values(by='ConfirmedCases', ascending=False).head(20) worst_affected.drop(columns=['Id', 'Date'], inplace=True) worst_affected
code
32072743/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) print('Number of unique province_country groups in test file: {}'.format(len(test_province_country_groups.groups.keys()))) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) print('Number of unique province_country groups in training file: {}'.format(len(province_country_groups.groups.keys())))
code
32072743/cell_45
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range)) worst_affected = latest.sort_values(by='ConfirmedCases', ascending=False).head(20) worst_affected.drop(columns=['Id', 'Date'], inplace=True) worst_affected worst_affected_locations = [worst_affected['Province_State'].iloc[i] if worst_affected['Province_State'].iloc[i] != 'None' else worst_affected['Country_Region'].iloc[i] for i in range(len(worst_affected))] worst_affected_locations plt.xticks(rotation='vertical') plt.figure(figsize=(18, 9)) plt.bar(worst_affected_locations, worst_affected['Fatalities']) plt.title('Number of Fatalities in the 20 Worst Affected Locations') plt.ylabel('Number of fatalities') plt.xticks(rotation='vertical') plt.show()
code
32072743/cell_49
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range)) province_country_groups = df_train.groupby(['Province_State', 'Country_Region'])
code
32072743/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum()
code
32072743/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') df_train.head()
code
32072743/cell_43
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: confirmed_cases_list[i] = confirmed_cases_list[i - 1] except IndexError: confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list date_range = df_train['Date'] day_groups = df_train.groupby('Date') latest = day_groups.get_group(max(date_range)) worst_affected = latest.sort_values(by='ConfirmedCases', ascending=False).head(20) worst_affected.drop(columns=['Id', 'Date'], inplace=True) worst_affected worst_affected_locations = [worst_affected['Province_State'].iloc[i] if worst_affected['Province_State'].iloc[i] != 'None' else worst_affected['Country_Region'].iloc[i] for i in range(len(worst_affected))] worst_affected_locations
code
32072743/cell_31
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) print('Number of unique province_country groups in training data: {}'.format(len(province_country_groups.groups.keys())))
code
32072743/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.info()
code
32072743/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train['Date'] = pd.to_datetime(df_train['Date'], format='%Y-%m-%d') df_test['Date'] = pd.to_datetime(df_test['Date'], format='%Y-%m-%d')
code
32072743/cell_36
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df_train = pd.read_csv('prediction_challenge/covid19-global-forecasting-week-4/train.csv') test_province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) df_train.isnull().sum() df_train.loc[df_train['Province_State'].isnull(), 'Province_State'] = 'None' df_train.isnull().sum() province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) province_country_groups = df_train.groupby(['Province_State', 'Country_Region']) confirmed_cases_list = [] for p_c in province_country_groups.groups.keys(): confirmed_cases_list = province_country_groups.get_group(p_c)['ConfirmedCases'].tolist() corrected = False for i in range(len(confirmed_cases_list) - 1): if confirmed_cases_list[i] > confirmed_cases_list[i + 1]: try: if confirmed_cases_list[i] <= confirmed_cases_list[i + 2]: print('Correcting low data point. Replaced {0} with {1} for country/province {2}'.format(confirmed_cases_list[i + 1], confirmed_cases_list[i], p_c)) confirmed_cases_list[i + 1] = confirmed_cases_list[i] elif confirmed_cases_list[i - 1] <= confirmed_cases_list[i + 1]: print('Correcting high data point. Replaced {0} with {1} for country/province {2}'.format(confirmed_cases_list[i], confirmed_cases_list[i - 1], p_c)) confirmed_cases_list[i] = confirmed_cases_list[i - 1] else: print('Not able to correct an erroneous point for for country/province {0} automatically'.format(p_c)) except IndexError: print('Correcting penultimate data point. Replaced {0} with {1} for country/province {2}'.format(confirmed_cases_list[i + 1], confirmed_cases_list[i], p_c)) confirmed_cases_list[i + 1] = confirmed_cases_list[i] corrected = True if corrected == True: print('Correcting for country/province {0}'.format(p_c)) df_train.loc[(df_train['Country_Region'] == p_c[1]) & (df_train['Province_State'] == p_c[0]), 'ConfirmedCases'] = confirmed_cases_list
code
121148680/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filepath = '/kaggle/input/er-fast-track' import matplotlib.pyplot as plt import numpy as np import pandas as pd df = pd.read_csv(f'{filepath}/heart.csv') df.head(5)
code
122264339/cell_4
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b a = 'The quick brown fox jumps over the lazy dog' b = set(a) count = 0 for i in b: count = count + 1 a = 'The quick brown fox jumps over the lazy dog' a = a.lower() a
code
122264339/cell_6
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b a = 'The quick brown fox jumps over the lazy dog' b = set(a) count = 0 for i in b: count = count + 1 a = 'The quick brown fox jumps over the lazy dog' a = a.lower() a a = 'The quick brown fox jumps over the lazy dog' a = a.lower() b = set(a) b a = 'The quick brown fox jumps over the lazy dog' a = a.lower() b = set(a) for i in b: if i.isalpha() == True: count = count + 1 if count == 26: print('The string is a palangram ') else: print('The string is not a palangram')
code
122264339/cell_2
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b
code
122264339/cell_8
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b a = 'The quick brown fox jumps over the lazy dog' b = set(a) count = 0 for i in b: count = count + 1 a = 'The quick brown fox jumps over the lazy dog' a = a.lower() a a = 'The quick brown fox jumps over the lazy dog' a = a.lower() b = set(a) b a = 'The quick brown fox jumps over the lazy dog' a = a.lower() b = set(a) for i in b: if i.isalpha() == True: count = count + 1 A = (3, 4, 5, 6) B = (4, 5) count = 0 for i in B: for j in A: if i == j: count = count + 1 break if count == len(B): print('B is a subset of A') else: print('B is not a subset of A')
code
122264339/cell_3
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b a = 'The quick brown fox jumps over the lazy dog' b = set(a) count = 0 for i in b: count = count + 1 print(count)
code
122264339/cell_5
[ "text_plain_output_1.png" ]
a = 'The quick brown fox jumps over the lazy dog' b = set(a) b a = 'The quick brown fox jumps over the lazy dog' b = set(a) count = 0 for i in b: count = count + 1 a = 'The quick brown fox jumps over the lazy dog' a = a.lower() a a = 'The quick brown fox jumps over the lazy dog' a = a.lower() b = set(a) b
code
129039294/cell_21
[ "text_plain_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) df_ratings['year'] = df_ratings['date'].dt.year df_ratings['year'].value_counts().plot() plt.title('Number of ratings per year') plt.show()
code
129039294/cell_13
[ "text_plain_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.info()
code
129039294/cell_9
[ "text_html_output_1.png" ]
import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum()
code
129039294/cell_25
[ "image_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) pd.set_option('display.max_rows', 100) df_movies.groupby('year_of_release').count() df_ratings['year'] = df_ratings['date'].dt.year df_movie_rating_count = df_ratings.groupby('movie_id').count() pd.DataFrame({'percentile': np.arange(5, 100, 5), 'n_ratings': np.percentile(df_movie_rating_count['rating'], np.arange(5, 100, 5)).astype('int')})
code
129039294/cell_4
[ "image_output_1.png" ]
import glob import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files
code
129039294/cell_20
[ "text_html_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) pd.set_option('display.max_rows', 100) df_movies.groupby('year_of_release').count()
code
129039294/cell_19
[ "text_plain_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) df_movies.groupby('year_of_release')['movie_id'].count().plot() plt.xticks(rotation=90) plt.title('Number of movies by year') plt.show()
code
129039294/cell_7
[ "text_html_output_1.png" ]
import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings.head()
code
129039294/cell_18
[ "text_plain_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) df_movies.groupby('year_of_release')['movie_id'].count().plot() plt.xticks(rotation=90) plt.title('Number of movies by year') plt.show()
code
129039294/cell_28
[ "image_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) pd.set_option('display.max_rows', 100) df_movies.groupby('year_of_release').count() df_ratings['year'] = df_ratings['date'].dt.year df_movie_rating_count = df_ratings.groupby('movie_id').count() pd.DataFrame({'percentile': np.arange(5, 100, 5), 'n_ratings': np.percentile(df_movie_rating_count['rating'], np.arange(5, 100, 5)).astype('int')}) today = '2005-01-01' today_dt = pd.to_datetime(today) df_ratings['days_from_rating'] = (today_dt - df_ratings['date']).dt.days.clip(lower=0) df_ratings[['date', 'days_from_rating']].head()
code
129039294/cell_8
[ "text_html_output_1.png" ]
import glob import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.info()
code
129039294/cell_3
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129039294/cell_17
[ "text_html_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) df_ratings['rating'].value_counts(normalize=True).plot(kind='bar') plt.title('Proportions of ratings per rating values') plt.show()
code
129039294/cell_24
[ "image_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) df_ratings['year'] = df_ratings['date'].dt.year df_movie_rating_count = df_ratings.groupby('movie_id').count() sns.ecdfplot(data=df_movie_rating_count, x='rating') plt.title('CDF of number of ratings by movies') plt.show()
code
129039294/cell_14
[ "text_html_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum()
code
129039294/cell_22
[ "text_plain_output_1.png" ]
import glob import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings['movie_id'] = np.where(df_ratings['rating'].isna(), df_ratings['customer_id'], np.nan) df_ratings['movie_id'] = df_ratings['movie_id'].str.split(':').str[0] df_ratings['movie_id'] = df_ratings['movie_id'].fillna(method='ffill') df_ratings.dropna(subset=['rating', 'date'], inplace=True) df_ratings = df_ratings.astype({'customer_id': 'int', 'movie_id': 'int'}) df_ratings.isna().sum() df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.isna().sum() df_ratings.drop_duplicates(inplace=True) df_movies.drop_duplicates(inplace=True) plt.xticks(rotation=90) plt.xticks(rotation=90) df_ratings['year'] = df_ratings['date'].dt.year df_movie_rating_count = df_ratings.groupby('movie_id').count() sns.boxplot(data=df_movie_rating_count, x='rating') plt.title('Number of ratings by movies') plt.show()
code
129039294/cell_12
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_movies = pd.read_csv('/kaggle/working/modified_movie_titles.csv', header=None, names=['movie_id', 'year_of_release', 'title'], parse_dates=['year_of_release'], encoding='latin-1') df_movies.head()
code
129039294/cell_5
[ "image_output_1.png" ]
import glob import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import glob rating_files = glob.glob('/kaggle/input/netflix-prize-data/combined_data_*.txt') rating_files df_ratings = pd.concat([pd.read_csv(filename, header=None, names=['customer_id', 'rating', 'date'], parse_dates=['date']) for filename in rating_files]) df_ratings.head()
code
16123001/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() cat_col = Health_df.select_dtypes(exclude=np.number).drop(['Notes', 'Methods'], axis=1) num_col = Health_df.select_dtypes(include=np.number) cat_col = cat_col.apply(LabelEncoder().fit_transform) final_df = pd.concat([cat_col, num_col], axis=1) final_df.corr()
code
16123001/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) Health_df['Indicator Category'].unique()
code
16123001/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.head()
code
16123001/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() cat_col = Health_df.select_dtypes(exclude=np.number).drop(['Notes', 'Methods'], axis=1) num_col = Health_df.select_dtypes(include=np.number) cat_col = cat_col.apply(LabelEncoder().fit_transform) final_df = pd.concat([cat_col, num_col], axis=1) final_df.head()
code
16123001/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.describe()
code
16123001/cell_29
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) from sklearn.preprocessing import LabelEncoder labelencoder = LabelEncoder() cat_col = Health_df.select_dtypes(exclude=np.number).drop(['Notes', 'Methods'], axis=1) num_col = Health_df.select_dtypes(include=np.number) lm = LinearRegression() lm.fit(X_train, Y_train) Y_train_predict = lm.predict(X_train) Y_test_predict = lm.predict(X_test) print('MSE Train:', mean_squared_error(Y_train, Y_train_predict)) print('MSE Test:', mean_squared_error(Y_test, Y_test_predict)) print('RMSE Train:', np.sqrt(mean_squared_error(Y_train, Y_train_predict))) print('RMSE Test:', np.sqrt(mean_squared_error(Y_test, Y_test_predict))) print('MAE Train', mean_absolute_error(Y_train, Y_train_predict)) print('MAE Test', mean_absolute_error(Y_test, Y_test_predict)) print('R2 Train', r2_score(Y_train, Y_train_predict)) print('R2 Test', r2_score(Y_test, Y_test_predict))
code
16123001/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train, Y_train)
code
16123001/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) list(Health_df['Source'].unique())
code
16123001/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16123001/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique()))
code
16123001/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum()
code
16123001/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.info()
code
16123001/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) Health_df[Health_df['Indicator Category'] == 'Demographics']
code
16123001/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) Health_df['Source'].value_counts()
code
16123001/cell_27
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression lm = LinearRegression() lm.fit(X_train, Y_train) print('Intercept value:', lm.intercept_) print('Coefficient values:', lm.coef_)
code
16123001/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') Health_df.apply(lambda x: len(x.unique())) Health_df.isna().sum() Health_df.dropna(subset=['Value'], inplace=True) Health_df[Health_df['Value'] == 80977]
code
16123001/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') list(Health_df['Indicator'].unique())
code
121149196/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121149196/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') if df.isnull().values.any(): print('There are empty cells in the dataframe') else: print('There are no empty cells in the dataframe')
code
121149196/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/noshowappointments/KaggleV2-May-2016.csv') duplicates_P = df[df.duplicated(['PatientId'])] if duplicates_P.empty: print('There are no duplicates in the PatientId') else: print(f'There are {len(duplicates)} duplicates in the PatientId') df.drop_duplicates(subset=['PatientId'], keep='first', inplace=True) print(f'Removed {len(duplicates) - len(df)} duplicates') duplicates_A = df.duplicated(subset='AppointmentID', keep='first') if duplicates_A.sum() > 0: df.drop_duplicates(subset='AppointmentID', keep='first', inplace=True) print(f'{duplicates.sum()} duplicates found and removed.') else: print('There are no duplicates in the AppointmentID')
code
17134452/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode from plotly.tools import make_subplots import advertools as adv import pandas as pd import plotly.graph_objs as go import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) serp_clubs.drop_duplicates(['searchTerms']).groupby('searchTerms', as_index=False).agg({'totalResults': 'sum'}).sort_values('totalResults', ascending=False).reset_index(drop=True).head(15).style.format({'totalResults': '{:,}'}) hl_domain_appearances = serp_clubs.groupby(['hl', 'displayLink']).agg({'rank': 'count'}).reset_index().sort_values(['hl', 'rank'], ascending=False).rename(columns={'rank': 'search_appearances'}) hl_domain_appearances.groupby(['hl']).head(5) fig = make_subplots(1, 7, print_grid=False, shared_yaxes=True) for i, lang in enumerate(serp_clubs['hl'].unique()[:7]): df = serp_clubs[serp_clubs['hl'] == lang] fig.append_trace(go.Bar(y=df['displayLink'].value_counts().values[:8], x=df['displayLink'].value_counts().index.str.replace('www.', '')[:8], name=lang, orientation='v'), row=1, col=i + 1) fig.layout.margin = {'b': 150, 'r': 30} fig.layout.legend.orientation = 'h' fig.layout.legend.y = -0.5 fig.layout.legend.x = 0.15 fig.layout.title = 'Top Domains by Language of Search' fig.layout.yaxis.title = 'Number of Appearances on SERPs' fig.layout.plot_bgcolor = '#eeeeee' fig.layout.paper_bgcolor = '#eeeeee' iplot(fig)
code
17134452/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) serp_clubs['displayLink'].value_counts()[:10]
code
17134452/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
lang_football = {'en': 'football', 'fr': 'football', 'de': 'fußball', 'es': 'fútbol', 'it': 'calcio', 'pt-BR': 'futebol', 'nl': 'voetbal'} lang_football len(lang_football)
code
17134452/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') clubs.head(10)
code
17134452/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') serp_clubs = pd.read_csv('../input/serp_clubs.csv') serp_clubs['totalResults'] = serp_clubs['totalResults'].astype('int') serp_clubs['queryTime'] = pd.to_datetime(serp_clubs['queryTime']) serp_clubs.drop_duplicates(['searchTerms']).groupby('searchTerms', as_index=False).agg({'totalResults': 'sum'}).sort_values('totalResults', ascending=False).reset_index(drop=True).head(15).style.format({'totalResults': '{:,}'}) hl_domain_appearances = serp_clubs.groupby(['hl', 'displayLink']).agg({'rank': 'count'}).reset_index().sort_values(['hl', 'rank'], ascending=False).rename(columns={'rank': 'search_appearances'}) hl_domain_appearances.groupby(['hl']).head(5)
code
17134452/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from plotly.offline import iplot, init_notebook_mode import advertools as adv import pandas as pd import advertools as adv import pandas as pd pd.options.display.max_columns = None from plotly.tools import make_subplots import plotly.graph_objs as go from plotly.offline import iplot, init_notebook_mode init_notebook_mode() adv.__version__ column_key = pd.read_csv('../input/column_key.csv') column_key clubs = pd.read_csv('../input/clubs.csv') top_countries = clubs.groupby('Country').agg({'Total': 'sum'}).sort_values('Total', ascending=False).reset_index().head(10) top_countries clubs.groupby(['Country']).agg({'Club': 'count', 'Total': 'sum'}).sort_values('Club', ascending=False).reset_index().head(9).set_axis(['country', 'num_clubs', 'total_wins'], axis=1, inplace=False).assign(wins_per_club=lambda df: df['total_wins'].div(df['num_clubs'])).style.background_gradient(high=0.2)
code