path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
320604/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass cc_data = get_data('credit card') plot_two_fields(cc_data, 'credit card', 'loan_amnt', 'int_rate', [100.0, 100000.0, 5.0, 30.0], 'loan amount', 'interest rate', 'semilogx')
code
320604/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') plot_two_fields(medical_data, 'medical', 'home_ownership', 'funded_amnt', [-1, 6, 0.0, 35000.0], 'home ownership', 'funded amount', 'standard')
code
320604/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass cc_data = get_data('credit card') plot_two_fields(cc_data, 'credit card', 'annual_inc', 'int_rate', [1000.0, 10000000.0, 5.0, 30.0], 'annual income', 'interest rate', 'semilogx')
code
320604/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') plot_two_fields(medical_data, 'medical', 'annual_inc', 'int_rate', [1000.0, 10000000.0, 5.0, 30.0], 'annual income', 'interest rate', 'semilogx')
code
320604/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') plot_two_fields(medical_data, 'medical', 'annual_inc', 'loan_amnt', [1000.0, 10000000.0, 0.0, 35000.0], 'annual income', 'loan amount', 'semilogx')
code
320604/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') plot_two_fields(medical_data, 'medical', 'loan_amnt', 'funded_amnt', [0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount', 'standard')
code
320604/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import tree import matplotlib.pyplot as plt import numpy as np import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') def create_classifier(f, t, nt): """Create classifier for predicting loan status. Print accuracy. Arguments: f (list of tuples) -- [(sample 1 features), (sample 2 features),...] t (list) -- [sample 1 target, sample 2 target,...] nt (int) -- number of samples to use in training set """ training_set_features = [] training_set_target = [] testing_set_features = [] testing_set_target = [] for i in np.arange(0, nt, 1): training_set_features.append(f[i]) training_set_target.append(t[i]) for i in np.arange(nt, len(f), 1): testing_set_features.append(f[i]) testing_set_target.append(t[i]) clf = tree.DecisionTreeClassifier() clf = clf.fit(training_set_features, training_set_target) n = 0 n_correct = 0 n0 = 0 n0_correct = 0 n1 = 0 n1_correct = 0 for i in range(len(testing_set_features)): t = testing_set_target[i] p = clf.predict(np.asarray(testing_set_features[i]).reshape(1, -1)) if t == 0: if t == p[0]: equal = 'yes' n_correct += 1 n0_correct += 1 else: equal = 'no' n += 1 n0 += 1 elif t == 1: if t == p[0]: equal = 'yes' n_correct += 1 n1_correct += 1 else: equal = 'no' n += 1 n1 += 1 else: pass n_accuracy = 100.0 * n_correct / n n0_accuracy = 100.0 * n0_correct / n0 n1_accuracy = 100.0 * n1_correct / n1 create_classifier(medical_data[0], medical_data[1], 2000)
code
320604/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass debt_data = get_data('debt') plot_two_fields(debt_data, 'debt', 'loan_amnt', 'funded_amnt', [0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount', 'standard')
code
320604/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass medical_data = get_data('medical') plot_two_fields(medical_data, 'medical', 'loan_amnt', 'int_rate', [100.0, 100000.0, 5.0, 30.0], 'loan amount', 'interest rate', 'semilogx')
code
320604/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass debt_data = get_data('debt') plot_two_fields(debt_data, 'debt', 'annual_inc', 'int_rate', [1000.0, 10000000.0, 5.0, 30.0], 'annual income', 'interest rate', 'semilogx')
code
320604/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import sqlite3 import matplotlib.pyplot as plt import numpy as np import sqlite3 from sklearn import tree def sql_query(s): """Return results for a SQL query. Arguments: s (str) -- SQL query string Returns: (list) -- SQL query results """ conn = sqlite3.connect('../input/database.sqlite') c = conn.cursor() c.execute(s) result = c.fetchall() conn.close() return result def print_details(): """Print database details including table names and the number of rows. """ table_names = sql_query('SELECT name FROM sqlite_master ' + "WHERE type='table' " + 'ORDER BY name;')[0][0] num_rows = sql_query('SELECT COUNT(*) FROM loan;')[0][0] def print_column_names(): """Print the column names in the 'loan' table. Note that the "index" column name is specific to Python and is not part of the original SQLite database. """ conn = sqlite3.connect('../input/database.sqlite') conn.row_factory = sqlite3.Row c = conn.cursor() c.execute('SELECT * FROM loan LIMIT 2;') r = c.fetchone() i = 1 for k in r.keys(): i += 1 conn.close() emp_length_dict = {'n/a': 0, '< 1 year': 0, '1 year': 1, '2 years': 2, '3 years': 3, '4 years': 4, '5 years': 5, '6 years': 6, '7 years': 7, '8 years': 8, '9 years': 9, '10+ years': 10} home_ownership_dict = {'MORTGAGE': 0, 'OWN': 1, 'RENT': 2, 'OTHER': 3, 'NONE': 4, 'ANY': 5} features_dict = {'loan_amnt': 0, 'int_rate': 1, 'annual_inc': 2, 'delinq_2yrs': 3, 'open_acc': 4, 'dti': 5, 'emp_length': 6, 'funded_amnt': 7, 'tot_cur_bal': 8, 'home_ownership': 9} def get_data(s): """Return features and targets for a specific search term. Arguments: s (str) -- string to search for in loan "title" field Returns: (list of lists) -- [list of feature tuples, list of targets] (features) -- [(sample1 features), (sample2 features),...] (target) -- [sample1 target, sample2 target,...] """ data = sql_query('SELECT ' + 'loan_amnt,int_rate,annual_inc,' + 'loan_status,title,delinq_2yrs,' + 'open_acc,dti,emp_length,' + 'funded_amnt,tot_cur_bal,home_ownership ' + 'FROM loan ' + "WHERE application_type='INDIVIDUAL';") features_list = [] target_list = [] n = 0 n0 = 0 n1 = 0 for d in data: test0 = isinstance(d[0], float) test1 = isinstance(d[1], str) test2 = isinstance(d[2], float) test3 = isinstance(d[3], str) test4 = isinstance(d[4], str) test5 = isinstance(d[5], float) test6 = isinstance(d[6], float) test7 = isinstance(d[7], float) test8 = isinstance(d[8], str) test9 = isinstance(d[9], float) test10 = isinstance(d[10], float) if test0 and test1 and test2 and test3 and test4 and test5 and test6 and test7 and test8 and test9 and test10: try: d1_float = float(d[1].replace('%', '')) except: continue try: e = emp_length_dict[d[8]] except: continue try: h = home_ownership_dict[d[11]] except: continue if s.lower() in d[4].lower(): if d[3] == 'Fully Paid' or d[3] == 'Current': target = 0 n += 1 n0 += 1 elif 'Late' in d[3] or d[3] == 'Charged Off': target = 1 n += 1 n1 += 1 else: continue features = (d[0], float(d[1].replace('%', '')), d[2], d[5], d[6], d[7], emp_length_dict[d[8]], d[9], d[10], home_ownership_dict[d[11]]) features_list.append(features) target_list.append(target) else: pass result = [features_list, target_list] return result def create_scatter_plot(x0_data, y0_data, x1_data, y1_data, pt, pa, x_label, y_label, axis_type): ax = plt.gca() ax.set_axis_bgcolor('#BBBBBB') ax.set_axisbelow(True) plt.axis(pa) plt.xticks(fontsize=16) plt.yticks(fontsize=16) if axis_type == 'semilogx': plt.semilogx(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogx(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'semilogy': plt.semilogy(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.semilogy(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') elif axis_type == 'loglog': plt.loglog(x0_data, y0_data, label='0: "Fully Paid" or "Current"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='b') plt.loglog(x1_data, y1_data, label='1: "Late" or "Charged Off"', linestyle='None', marker='.', markersize=8, alpha=0.5, color='r') plt.clf() def plot_two_fields(data, s, f1, f2, pa, x_label, y_label, axis_type): x0_list = [] y0_list = [] x1_list = [] y1_list = [] features_list = data[0] target_list = data[1] for i in range(len(features_list)): x = features_list[i][features_dict[f1]] y = features_list[i][features_dict[f2]] if target_list[i] == 0: x0_list.append(x) y0_list.append(y) elif target_list[i] == 1: x1_list.append(x) y1_list.append(y) else: pass cc_data = get_data('credit card') plot_two_fields(cc_data, 'credit card', 'loan_amnt', 'funded_amnt', [0.0, 35000.0, 0.0, 35000.0], 'loan amount', 'funded amount', 'standard')
code
1003319/cell_9
[ "image_output_1.png" ]
import os # for doing directory operations import dicom import os import pandas as pd data_dir = '../input/sample_images/' patients = os.listdir(data_dir) patients file_list = os.listdir('../input/') file_list len(patients)
code
1003319/cell_2
[ "text_plain_output_1.png" ]
import os # for doing directory operations import dicom import os import pandas as pd data_dir = '../input/sample_images/' patients = os.listdir(data_dir) patients file_list = os.listdir('../input/') file_list
code
1003319/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output """ So I want to test some transformations and changes to the modelling data: ** for each of the below get it running on 5% then validate on 20% modelling data) (1) Try Different combinations of "resizing data" (2) Try "Resampling approach" in pre-processing (3) Try all of the transformations below on pixel array and MXNET transformed features reciprocal t = 1 / x log base 10 t = log_10 x 10 to the power x = 10^t log base e t = log_e x = ln x e to the power x = exp(t) log base 2 t = log_2 x 2 to the power x = 2^t cube root t = x^(1/3) cube x = t^3 square root t = x^(1/2) square x = t^2 (data.dat$Y)^(1/9) Takes the ninth root of Y abs(data.dat$Y) Finds the absolute value of Y ** Try all known trigonometric functions ** sin(data.dat$Y) Calculates the sine of Y asin(data.dat$Y) Calculates the inverse sine (arcsine) of Y (4) Try Masking all images then try all transformation in (3) (5) Try dropping all columns with no useful information """ import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1003319/cell_8
[ "text_plain_output_1.png" ]
import dicom # for reading dicom files import os # for doing directory operations import dicom import os import pandas as pd data_dir = '../input/sample_images/' patients = os.listdir(data_dir) patients file_list = os.listdir('../input/') file_list for patient in patients[:1]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) for patient in patients[:3]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) print(slices[0].pixel_array.shape, len(slices))
code
1003319/cell_3
[ "text_plain_output_1.png" ]
import dicom # for reading dicom files import os # for doing directory operations import dicom import os import pandas as pd data_dir = '../input/sample_images/' patients = os.listdir(data_dir) patients file_list = os.listdir('../input/') file_list for patient in patients[:1]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) print(len(slices)) print(slices[0])
code
1003319/cell_10
[ "text_plain_output_1.png" ]
import dicom # for reading dicom files import matplotlib.pyplot as plt import os # for doing directory operations import dicom import os import pandas as pd data_dir = '../input/sample_images/' patients = os.listdir(data_dir) patients file_list = os.listdir('../input/') file_list for patient in patients[:1]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) for patient in patients[:3]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) import matplotlib.pyplot as plt for patient in patients[:1]: path = data_dir + patient slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)] slices.sort(key=lambda x: int(x.ImagePositionPatient[2])) plt.imshow(slices[0].pixel_array) plt.show()
code
128043747/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pygpt4all.models.gpt4all import GPT4All from pygpt4all.models.gpt4all import GPT4All model = GPT4All('ggml-gpt4all-l13b-snoozy.bin', n_ctx=2048)
code
128043747/cell_1
[ "text_plain_output_1.png" ]
! pip install pygpt4all ! wget http://gpt4all.io/models/ggml-gpt4all-l13b-snoozy.bin
code
128043747/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from bs4 import BeautifulSoup from pygpt4all.models.gpt4all import GPT4All import requests from pygpt4all.models.gpt4all import GPT4All model = GPT4All('ggml-gpt4all-l13b-snoozy.bin', n_ctx=2048) def gpt4all(prompt): model.generate(prompt, n_predict=500, new_text_callback=lambda x: print(x, end='')) import requests from bs4 import BeautifulSoup repo = '0xk1h0/ChatGPT_DAN' url = f'https://raw.githubusercontent.com/{repo}/main/README.md' bsoup = BeautifulSoup(requests.get(url).content.decode('utf8')) dans = {} for li in bsoup.find_all('li'): details = li.get_text('\n').split('\n') details = [p for p in details if p] name, dan = (details[0], '\n'.join(details[1:])) dans[name] = dan dan_context = dans['The Jailbreak Prompt'] model_to_tune = 'togethercomputer/GPT-NeoXT-Chat-Base-20B' prompt = str(f'Using the this prompt as training data, "{dan_context}"\n\nQuestion: Can you generate a Python code to fine-tune the using the {model_to_tune} model with Huggingface transformer library?\n\nAnswer:') gpt4all(prompt)
code
128043747/cell_5
[ "text_plain_output_1.png" ]
from bs4 import BeautifulSoup import requests import requests from bs4 import BeautifulSoup repo = '0xk1h0/ChatGPT_DAN' url = f'https://raw.githubusercontent.com/{repo}/main/README.md' bsoup = BeautifulSoup(requests.get(url).content.decode('utf8')) dans = {} for li in bsoup.find_all('li'): details = li.get_text('\n').split('\n') details = [p for p in details if p] name, dan = (details[0], '\n'.join(details[1:])) dans[name] = dan dan_context = dans['The Jailbreak Prompt'] dan_context
code
128045992/cell_13
[ "text_plain_output_1.png" ]
from tensorflow.keras import layers import cv2 import numpy as np import os import tensorflow as tf subdir = ['angry', 'notAngry'] target = {'angry': 0, 'notAngry': 1} dataset = '../input/fer2013new2class/FER2013NEW2CLASS/train/' X = [] y = [] for emotions in subdir: for img_names in os.listdir(dataset + '/' + emotions): load_images = cv2.imread(dataset + '/' + emotions + '/' + img_names) X.append(load_images) y.append(target[emotions]) X, y = (np.array(X), np.array(y)) (X.shape, y.shape) y = tf.keras.utils.to_categorical(y, num_classes=2) y.shape class SEBlock(layers.Layer): def __init__(self, channels, reduction=16): super(SEBlock, self).__init__() self.avg_pool = layers.GlobalAveragePooling2D() self.fc1 = layers.Dense(channels // reduction, activation='relu') self.fc2 = layers.Dense(channels, activation='sigmoid') def call(self, input_tensor): bs, c = (input_tensor.shape[0], input_tensor.shape[-1]) y = self.avg_pool(input_tensor) y = self.fc1(y) y = self.fc2(y) y = tf.reshape(y, [bs, 1, 1, c]) return input_tensor * y def create_model(): model = tf.keras.Sequential([layers.Conv2D(32, (3, 3), activation='relu', input_shape=(48, 48, 3)), SEBlock(32), layers.BatchNormalization(), layers.MaxPooling2D(pool_size=(2, 2)), layers.Conv2D(64, (3, 3), activation='relu'), SEBlock(64), layers.BatchNormalization(), layers.MaxPooling2D(pool_size=(2, 2)), layers.Flatten(), layers.Dense(128, activation='relu'), layers.Dense(2, activation='sigmoid')]) return model model = create_model() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
code
128045992/cell_4
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os subdir = ['angry', 'notAngry'] target = {'angry': 0, 'notAngry': 1} dataset = '../input/fer2013new2class/FER2013NEW2CLASS/train/' X = [] y = [] for emotions in subdir: for img_names in os.listdir(dataset + '/' + emotions): load_images = cv2.imread(dataset + '/' + emotions + '/' + img_names) X.append(load_images) y.append(target[emotions]) X, y = (np.array(X), np.array(y)) (X.shape, y.shape)
code
128045992/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
code
128045992/cell_3
[ "text_plain_output_1.png" ]
import cv2 import os subdir = ['angry', 'notAngry'] target = {'angry': 0, 'notAngry': 1} dataset = '../input/fer2013new2class/FER2013NEW2CLASS/train/' X = [] y = [] for emotions in subdir: for img_names in os.listdir(dataset + '/' + emotions): load_images = cv2.imread(dataset + '/' + emotions + '/' + img_names) X.append(load_images) y.append(target[emotions]) print('X:', len(X), 'y:', len(y))
code
128045992/cell_5
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import os import tensorflow as tf subdir = ['angry', 'notAngry'] target = {'angry': 0, 'notAngry': 1} dataset = '../input/fer2013new2class/FER2013NEW2CLASS/train/' X = [] y = [] for emotions in subdir: for img_names in os.listdir(dataset + '/' + emotions): load_images = cv2.imread(dataset + '/' + emotions + '/' + img_names) X.append(load_images) y.append(target[emotions]) X, y = (np.array(X), np.array(y)) (X.shape, y.shape) y = tf.keras.utils.to_categorical(y, num_classes=2) y.shape
code
16142841/cell_1
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
16142841/cell_7
[ "text_plain_output_1.png" ]
print('End')
code
16142841/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/train_data.csv') test_df = pd.read_csv('../input/test_data.csv')
code
122244636/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer import cv2 as cv import numpy as np import os import re X = [] Y = [] input_shape = (96, 96, 3) path_to_subset = f'../input/apparel-images-dataset/' for folder in os.listdir(path_to_subset): for image in os.listdir(os.path.join(path_to_subset, folder)): path_to_image = os.path.join(path_to_subset, folder, image) image = cv.imread(path_to_image) image = cv.resize(image, (input_shape[1], input_shape[0])) label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_') X.append(image) Y.append(label) X = np.array(X) / 255.0 Y = np.array(Y) mlb = MultiLabelBinarizer() Y = mlb.fit_transform(Y) x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1) train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1) print(x.shape, test_x.shape, y.shape, test_y.shape) print(train_x.shape, val_x.shape, train_y.shape, val_y.shape) datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2)
code
122244636/cell_6
[ "image_output_1.png" ]
from keras.layers import BatchNormalization, Activation, Dropout from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D from keras.models import Sequential from keras.utils import plot_model from sklearn.preprocessing import MultiLabelBinarizer import cv2 as cv import numpy as np import os import re X = [] Y = [] input_shape = (96, 96, 3) path_to_subset = f'../input/apparel-images-dataset/' for folder in os.listdir(path_to_subset): for image in os.listdir(os.path.join(path_to_subset, folder)): path_to_image = os.path.join(path_to_subset, folder, image) image = cv.imread(path_to_image) image = cv.resize(image, (input_shape[1], input_shape[0])) label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_') X.append(image) Y.append(label) X = np.array(X) / 255.0 Y = np.array(Y) mlb = MultiLabelBinarizer() Y = mlb.fit_transform(Y) model = Sequential() model.add(Conv2D(32, 3, padding='same', input_shape=input_shape, kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(3)) model.add(Dropout(0.25)) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Conv2D(128, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, 2, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.summary() model.add(Dense(len(mlb.classes_), activation='sigmoid')) from keras.utils import plot_model plot_model(model)
code
122244636/cell_7
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_673.png", "text_plain_output_445.png", "text_plain_output_640.png", "text_plain_output_201.png", "text_plain_output_586.png", "text_plain_output_261.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_624.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_205.png", "text_plain_output_693.png", "text_plain_output_511.png", "text_plain_output_608.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_282.png", "text_plain_output_579.png", "text_plain_output_629.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_35.png", "text_plain_output_501.png", "text_plain_output_593.png", "text_plain_output_258.png", "text_plain_output_685.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_367.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_395.png", "text_plain_output_617.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_674.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_671.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_678.png", "text_plain_output_688.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_614.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_576.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_670.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_664.png", "text_plain_output_627.png", "text_plain_output_613.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_684.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_5.png", "text_plain_output_642.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_676.png", "text_plain_output_687.png", "text_plain_output_492.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_634.png", "text_plain_output_656.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_651.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_387.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_404.png", "text_plain_output_114.png", "text_plain_output_659.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_633.png", "text_plain_output_325.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_603.png", "text_plain_output_655.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_583.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_639.png", "text_plain_output_20.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_686.png", "text_plain_output_669.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_589.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_691.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_221.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_654.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_638.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_618.png", "text_plain_output_64.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_637.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_666.png", "text_plain_output_107.png", "text_plain_output_567.png", "text_plain_output_628.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_695.png", "text_plain_output_318.png", "text_plain_output_417.png", "text_plain_output_690.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_611.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_599.png", "text_plain_output_692.png", "text_plain_output_442.png", "text_plain_output_665.png", "text_plain_output_300.png", "text_plain_output_660.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_606.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_129.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_680.png", "text_plain_output_622.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_607.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_681.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_581.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_503.png", "text_plain_output_578.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_658.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_623.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_646.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_96.png", "text_plain_output_663.png", "text_plain_output_87.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_657.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_191.png", "text_plain_output_609.png", "text_plain_output_259.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_506.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_667.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_454.png", "text_plain_output_487.png", "text_plain_output_595.png", "text_plain_output_643.png", "text_plain_output_338.png", "text_plain_output_575.png", "text_plain_output_197.png", "text_plain_output_512.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_682.png", "text_plain_output_433.png", "text_plain_output_7.png", "text_plain_output_528.png", "text_plain_output_648.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_513.png", "text_plain_output_314.png", "text_plain_output_592.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_645.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_652.png", "text_plain_output_644.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_630.png", "text_plain_output_435.png", "text_plain_output_689.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_580.png", "text_plain_output_409.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_539.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_662.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_601.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_620.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_668.png", "text_plain_output_220.png", "text_plain_output_653.png", "text_plain_output_543.png", "text_plain_output_451.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_616.png", "text_plain_output_615.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_649.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_636.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_508.png", "text_plain_output_573.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_635.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_23.png", "text_plain_output_610.png", "text_plain_output_173.png", "text_plain_output_683.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_632.png", "text_plain_output_51.png", "text_plain_output_677.png", "text_plain_output_626.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_672.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_569.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_500.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_590.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_33.png", "text_plain_output_650.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_631.png", "text_plain_output_39.png", "text_plain_output_176.png", "text_plain_output_584.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_679.png", "text_plain_output_641.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_619.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_694.png", "text_plain_output_402.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_597.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_625.png", "text_plain_output_194.png", "text_plain_output_577.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_480.png", "text_plain_output_303.png", "text_plain_output_621.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_587.png", "text_plain_output_448.png", "text_plain_output_364.png", "text_plain_output_365.png", "text_plain_output_61.png", "text_plain_output_585.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_647.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_600.png", "text_plain_output_661.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "text_plain_output_675.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import BatchNormalization, Activation, Dropout from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator from sklearn.model_selection import train_test_split from sklearn.preprocessing import MultiLabelBinarizer import cv2 as cv import numpy as np import os import re X = [] Y = [] input_shape = (96, 96, 3) path_to_subset = f'../input/apparel-images-dataset/' for folder in os.listdir(path_to_subset): for image in os.listdir(os.path.join(path_to_subset, folder)): path_to_image = os.path.join(path_to_subset, folder, image) image = cv.imread(path_to_image) image = cv.resize(image, (input_shape[1], input_shape[0])) label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_') X.append(image) Y.append(label) X = np.array(X) / 255.0 Y = np.array(Y) mlb = MultiLabelBinarizer() Y = mlb.fit_transform(Y) x, test_x, y, test_y = train_test_split(X, Y, test_size=0.1, stratify=Y, shuffle=True, random_state=1) train_x, val_x, train_y, val_y = train_test_split(x, y, test_size=0.2, stratify=y, shuffle=True, random_state=1) datagen = ImageDataGenerator(rotation_range=45, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2, horizontal_flip=True, validation_split=0.2) model = Sequential() model.add(Conv2D(32, 3, padding='same', input_shape=input_shape, kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(3)) model.add(Dropout(0.25)) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Conv2D(128, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, 2, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.summary() model.add(Dense(len(mlb.classes_), activation='sigmoid')) checkpoint = ModelCheckpoint('../working/best_model.hdf5', save_best_only=True, monitor='val_loss', verbose=1) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) history = model.fit_generator(datagen.flow(train_x, train_y, batch_size=64), validation_data=(val_x, val_y), epochs=100, verbose=1, callbacks=[checkpoint])
code
122244636/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MultiLabelBinarizer import cv2 as cv import numpy as np import os import re X = [] Y = [] input_shape = (96, 96, 3) path_to_subset = f'../input/apparel-images-dataset/' for folder in os.listdir(path_to_subset): for image in os.listdir(os.path.join(path_to_subset, folder)): path_to_image = os.path.join(path_to_subset, folder, image) image = cv.imread(path_to_image) image = cv.resize(image, (input_shape[1], input_shape[0])) label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_') X.append(image) Y.append(label) X = np.array(X) / 255.0 Y = np.array(Y) mlb = MultiLabelBinarizer() Y = mlb.fit_transform(Y) print(mlb.classes_) print(Y[0])
code
122244636/cell_5
[ "text_plain_output_56.png", "text_plain_output_35.png", "text_plain_output_43.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_48.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_44.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_52.png", "text_plain_output_45.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_58.png", "text_plain_output_49.png", "text_plain_output_27.png", "text_plain_output_54.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_57.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_47.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_42.png", "text_plain_output_53.png", "text_plain_output_23.png", "text_plain_output_51.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_55.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png", "text_plain_output_46.png" ]
from keras.layers import BatchNormalization, Activation, Dropout from keras.layers import Conv2D, Dense, Flatten, MaxPooling2D from keras.models import Sequential from sklearn.preprocessing import MultiLabelBinarizer import cv2 as cv import numpy as np import os import re X = [] Y = [] input_shape = (96, 96, 3) path_to_subset = f'../input/apparel-images-dataset/' for folder in os.listdir(path_to_subset): for image in os.listdir(os.path.join(path_to_subset, folder)): path_to_image = os.path.join(path_to_subset, folder, image) image = cv.imread(path_to_image) image = cv.resize(image, (input_shape[1], input_shape[0])) label = re.findall('\\w+\\_\\w+', path_to_image)[0].split('_') X.append(image) Y.append(label) X = np.array(X) / 255.0 Y = np.array(Y) mlb = MultiLabelBinarizer() Y = mlb.fit_transform(Y) model = Sequential() model.add(Conv2D(32, 3, padding='same', input_shape=input_shape, kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(3)) model.add(Dropout(0.25)) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(64, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Conv2D(128, 3, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(128, 2, padding='same', kernel_initializer='he_normal', activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D()) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(1024, activation='relu', kernel_initializer='he_normal')) model.add(BatchNormalization()) model.add(Dropout(0.5)) model.summary() model.add(Dense(len(mlb.classes_), activation='sigmoid'))
code
49116351/cell_21
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data = data.drop(['date_heure_visite', 'chapitre', 'page', 'identifiant'], axis=1) data = pd.get_dummies(data, columns=['rubrique', 'device', 'browser', 'os', 'source']) data.shape
code
49116351/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum()
code
49116351/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 subscribers.head()
code
49116351/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum()
code
49116351/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data = data.drop(['date_heure_visite', 'chapitre', 'page', 'identifiant'], axis=1) data.head()
code
49116351/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49116351/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data = data.drop(['date_heure_visite', 'chapitre', 'page', 'identifiant'], axis=1) data.head()
code
49116351/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data['subscribed'].value_counts()
code
49116351/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data['browser'] = data['browser'].str.split(' ').str[0] data['browser'].value_counts()
code
49116351/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data['os'] = data['os'].str.split(' ').str[0] data['os'].value_counts()
code
49116351/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum() data.head(5)
code
49116351/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) indexes = data[data['numero_page'] == 0].index data.drop(indexes, inplace=True) data.isnull().sum() data['chapitre'].fillna('home_page', inplace=True) data.isnull().sum() data['region'].fillna('GR463', inplace=True) data['pays'].fillna('GC1', inplace=True) data.isnull().sum()
code
49116351/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count() dataset2 = dataset1.drop(['subscribed'], axis=1) dataset = pd.merge(dataset, dataset2, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1) dataset['subscribed'] = 0 data = pd.concat([dataset1, dataset]) data.to_csv('COOKIES_80v2.csv', index=False) data.head()
code
49116351/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) subscribers = pd.read_csv('../input/daim-hakathon/SUBSCRIBERS_80.csv') subscribers['subscribed'] = 1 dataset = pd.read_csv('../input/daim-hakathon/COOKIES_80.csv') dataset1 = pd.merge(dataset, subscribers) dataset1.count()
code
325654/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd import csv as csv import numpy as np import matplotlib.pyplot as plt import seaborn as sns from time import time train = pd.read_csv('../input/train.csv', dtype={'Age': np.float64}) test = pd.read_csv('../input/test.csv', dtype={'Age': np.float64}) data = np.array(train) test_data = np.array(test) number_passengers = np.size(data[0:, 1].astype(np.float)) number_survived = np.sum(data[0:, 1].astype(np.float)) proportion_survivors = number_survived / number_passengers women_only_stats = data[0:, 4] == 'female' men_only_stats = data[0:, 4] != 'female' women_onboard = data[women_only_stats, 1].astype(np.float) men_onboard = data[men_only_stats, 1].astype(np.float) proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard) proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard) print('Proportion of women who survived is %s' % proportion_women_survived) print('Proportion of men who survived is %s' % proportion_men_survived)
code
129019305/cell_13
[ "text_plain_output_1.png" ]
from matplotlib.colors import ListedColormap from matplotlib.colors import ListedColormap from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as mtp import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) y_pred = classifier.predict(x_test) from matplotlib.colors import ListedColormap x_set, y_set = (x_train, y_train) X1, X2 = np.meshgrid(np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01)) mtp.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('purple', 'green'))) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) from matplotlib.colors import ListedColormap x_set, y_set = (x_test, y_test) X1, X2 = np.meshgrid(np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01)) mtp.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('purple', 'green'))) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(('purple', 'green'))(i), label=j) mtp.title('Naive Bayes (test set)') mtp.xlabel('Age') mtp.ylabel('Estimated Salary') mtp.legend() mtp.show()
code
129019305/cell_11
[ "text_html_output_1.png" ]
from sklearn.metrics import confusion_matrix from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) y_pred = classifier.predict(x_test) from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) cm
code
129019305/cell_8
[ "image_output_1.png" ]
from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train)
code
129019305/cell_10
[ "text_html_output_1.png" ]
from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler import pandas as pd dataset = pd.read_csv('/kaggle/input/user-data/User_Data.csv') x = dataset.iloc[:, [2, 3]].values y = dataset.iloc[:, 4].values from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) y_pred = classifier.predict(x_test) pd.DataFrame(y_pred, y_test).head(20)
code
129019305/cell_12
[ "text_html_output_1.png" ]
from matplotlib.colors import ListedColormap from sklearn.naive_bayes import GaussianNB from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as mtp import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler() x_train = sc.fit_transform(x_train) x_test = sc.transform(x_test) from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(x_train, y_train) y_pred = classifier.predict(x_test) from matplotlib.colors import ListedColormap x_set, y_set = (x_train, y_train) X1, X2 = np.meshgrid(np.arange(start=x_set[:, 0].min() - 1, stop=x_set[:, 0].max() + 1, step=0.01), np.arange(start=x_set[:, 1].min() - 1, stop=x_set[:, 1].max() + 1, step=0.01)) mtp.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha=0.75, cmap=ListedColormap(('purple', 'green'))) mtp.xlim(X1.min(), X1.max()) mtp.ylim(X2.min(), X2.max()) for i, j in enumerate(np.unique(y_set)): mtp.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1], c=ListedColormap(('purple', 'green'))(i), label=j) mtp.title('Naive Bayes (Training set)') mtp.xlabel('Age') mtp.ylabel('Estimated Salary') mtp.legend() mtp.show()
code
129019305/cell_5
[ "image_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/user-data/User_Data.csv') x = dataset.iloc[:, [2, 3]].values y = dataset.iloc[:, 4].values dataset.head()
code
320410/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Y_train = pd.read_csv('../input/genderclassmodel.csv') test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') _train = train.copy() Y_train = _train['Survived'].copy() _train.drop(['Name', 'Ticket', 'Cabin', 'Survived'], axis=1, inplace=True) _train.fillna('-1', inplace=True) _train.replace('male', 1, inplace=True) _train.replace('female', 2, inplace=True) _train.replace('C', 1, inplace=True) _train.replace('Q', 2, inplace=True) _train.replace('S', 3, inplace=True) _test = test.copy() _test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) _test.fillna('-1', inplace=True) _test.replace('male', 1, inplace=True) _test.replace('female', 2, inplace=True) _test.replace('C', 1, inplace=True) _test.replace('Q', 2, inplace=True) _test.replace('S', 3, inplace=True) passenger_survived = train[train.Survived == 1]['PassengerId'].copy() passenger_died = train[train.Survived == 0]['PassengerId'].copy() _train_survived = _train[_train.PassengerId.isin(passenger_survived)].drop('PassengerId', axis=1) _train_survived.plot(kind='bar')
code
320410/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output from IPython.display import display from sklearn.ensemble import RandomForestClassifier from matplotlib import pyplot as plt import numpy as np import pandas as pd import sklearn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
320410/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Y_train = pd.read_csv('../input/genderclassmodel.csv') test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') _train = train.copy() Y_train = _train['Survived'].copy() _train.drop(['Name', 'Ticket', 'Cabin', 'Survived'], axis=1, inplace=True) _train.fillna('-1', inplace=True) _train.replace('male', 1, inplace=True) _train.replace('female', 2, inplace=True) _train.replace('C', 1, inplace=True) _train.replace('Q', 2, inplace=True) _train.replace('S', 3, inplace=True) _test = test.copy() _test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) _test.fillna('-1', inplace=True) _test.replace('male', 1, inplace=True) _test.replace('female', 2, inplace=True) _test.replace('C', 1, inplace=True) _test.replace('Q', 2, inplace=True) _test.replace('S', 3, inplace=True) passenger_survived = train[train.Survived == 1]['PassengerId'].copy() passenger_died = train[train.Survived == 0]['PassengerId'].copy() _train_survived = _train[_train.PassengerId.isin(passenger_survived)].drop('PassengerId', axis=1) model = RandomForestClassifier() model.fit(_train, Y_train)
code
320410/cell_8
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Y_train = pd.read_csv('../input/genderclassmodel.csv') test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') _train = train.copy() Y_train = _train['Survived'].copy() _train.drop(['Name', 'Ticket', 'Cabin', 'Survived'], axis=1, inplace=True) _train.fillna('-1', inplace=True) _train.replace('male', 1, inplace=True) _train.replace('female', 2, inplace=True) _train.replace('C', 1, inplace=True) _train.replace('Q', 2, inplace=True) _train.replace('S', 3, inplace=True) _test = test.copy() _test.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True) _test.fillna('-1', inplace=True) _test.replace('male', 1, inplace=True) _test.replace('female', 2, inplace=True) _test.replace('C', 1, inplace=True) _test.replace('Q', 2, inplace=True) _test.replace('S', 3, inplace=True) passenger_survived = train[train.Survived == 1]['PassengerId'].copy() passenger_died = train[train.Survived == 0]['PassengerId'].copy() _train_survived = _train[_train.PassengerId.isin(passenger_survived)].drop('PassengerId', axis=1) model = RandomForestClassifier() model.fit(_train, Y_train) predicted = test[['PassengerId']].copy() predicted['Survived'] = model.predict(_test) print(predicted.to_csv(index=False))
code
320410/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Y_train = pd.read_csv('../input/genderclassmodel.csv') test = pd.read_csv('../input/test.csv') train = pd.read_csv('../input/train.csv') print('test set has %s rows.' % test.shape[0]) print('train set has %s rows.' % train.shape[0])
code
88105099/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_path = '../input/nuclio10-dsc-1121/sales_train_merged.csv' df = pd.read_csv(data_path, index_col=0) df.head()
code
105197919/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() choco_data4 = choco_data one_hot = pd.get_dummies(choco_data4, columns=['company_location'], drop_first=False) one_hot.head()
code
105197919/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5)
code
105197919/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() choco_data4 = choco_data choco_data4['company_location'].value_counts()
code
105197919/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() choco_data2 = choco_data2.dropna() nullValues = choco_data2.isnull().sum() print(nullValues)
code
105197919/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105197919/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() print(nullValues)
code
105197919/cell_16
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() from sklearn import preprocessing label_encoder = preprocessing.LabelEncoder() choco_data3 = choco_data choco_data3['company_location'] = label_encoder.fit_transform(choco_data3['company_location']) choco_data3['company_location'].unique()
code
105197919/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data)
code
105197919/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() choco_data3 = choco_data choco_data3.head()
code
105197919/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() choco_data4 = choco_data one_hot = pd.get_dummies(choco_data4, columns=['company_location'], drop_first=False) choco_data_custom = ['brown', 'toblerone', 'perk', 'mars', 'mars', 'chocho', 'london', 'perk', 'brown'] print(pd.get_dummies(choco_data_custom, drop_first=True))
code
105197919/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12] choco_data = choco_data.drop(['num_ingredients', 'ingredients'], axis=1) nullValues = choco_data.isnull().sum() for columns in choco_data.columns: print(columns, len(choco_data[columns].unique()))
code
105197919/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) choco_data = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') choco_data2 = pd.read_csv('/kaggle/input/chocolate-bar-ratings/chocolate_bars.csv') len(choco_data) choco_data.sample(5) nullValues = choco_data.isnull().sum() nullValues[0:12]
code
18116618/cell_21
[ "image_output_1.png" ]
from pandas.plotting import scatter_matrix import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes from pandas.plotting import scatter_matrix scatter_matrix(housing[attributes[:6].index], figsize=(20, 20))
code
18116618/cell_34
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared pd.DataFrame(housing_prepared, columns=housing.columns, index=housing.index).head()
code
18116618/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() housing.plot(kind='scatter', x='GrLivArea', y='SalePrice', alpha=0.1)
code
18116618/cell_30
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] housing.describe()
code
18116618/cell_44
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) from sklearn.metrics import mean_squared_error housing_predictions = lin_reg.predict(housing_prepared) lin_mse = mean_squared_error(housing_labels, housing_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg.fit(housing_prepared, housing_labels) housing_predictions = tree_reg.predict(housing_prepared) tree_mse = mean_squared_error(housing_labels, housing_predictions) tree_rmse = np.sqrt(tree_mse) tree_rmse
code
18116618/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df.info()
code
18116618/cell_39
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) print('Predictions: ', lin_reg.predict(some_data_prepared)) print('Labels: ', list(some_labels))
code
18116618/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() housing.head()
code
18116618/cell_41
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) from sklearn.metrics import mean_squared_error housing_predictions = lin_reg.predict(housing_prepared) lin_mse = mean_squared_error(housing_labels, housing_predictions) lin_rmse = np.sqrt(lin_mse) lin_rmse
code
18116618/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11]
code
18116618/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes
code
18116618/cell_32
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared
code
18116618/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df.describe()
code
18116618/cell_35
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared housing.head()
code
18116618/cell_43
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeRegressor import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels) some_data = housing.iloc[:5] some_labels = housing_labels.iloc[:5] some_data_prepared = full_pipeline.transform(some_data) from sklearn.tree import DecisionTreeRegressor tree_reg = DecisionTreeRegressor() tree_reg.fit(housing_prepared, housing_labels)
code
18116618/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt df.hist(bins=50, figsize=(30, 20)) plt.show()
code
18116618/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() housing_labels.head()
code
18116618/cell_37
[ "text_plain_output_1.png" ]
from sklearn.impute import SimpleImputer from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') import matplotlib.pyplot as plt housing = df.copy() corr_matrix = housing.corr() attributes = corr_matrix['SalePrice'].sort_values(ascending=False) attributes top10 = attributes[1:11] attributes[:11] housing = df.drop('SalePrice', axis=1) housing_labels = df['SalePrice'].copy() top10 = list(top10.index) housing = housing[top10] from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.impute import SimpleImputer full_pipeline = Pipeline([('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) housing_prepared = full_pipeline.fit_transform(housing) housing_prepared from sklearn.linear_model import LinearRegression lin_reg = LinearRegression() lin_reg.fit(housing_prepared, housing_labels)
code
18116618/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df.head()
code
34141752/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer', 'Sir'] def substrings_in_string(big_string, substrings): for substring in substrings: if bool(re.search(substring, big_string)) == 1: return substring return np.nan train['Title'] = train['Name'].map(lambda x: substrings_in_string(x, title_list)) test['Title'] = test['Name'].map(lambda x: substrings_in_string(x, title_list)) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] train = train.fillna(value={'Cabin': 'Unknown'}) test = test.fillna(value={'Cabin': 'Unknown'}) train['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) test['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) sns.distplot(train[train['Sex'] == 'female']['Age'], color='pink', bins=15) sns.distplot(train[train['Sex'] == 'male']['Age'], color='blue', bins=15, hist_kws={'alpha': 0.2})
code
34141752/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.info() train.describe()
code
34141752/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer', 'Sir'] def substrings_in_string(big_string, substrings): for substring in substrings: if bool(re.search(substring, big_string)) == 1: return substring return np.nan train['Title'] = train['Name'].map(lambda x: substrings_in_string(x, title_list)) test['Title'] = test['Name'].map(lambda x: substrings_in_string(x, title_list)) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] train = train.fillna(value={'Cabin': 'Unknown'}) test = test.fillna(value={'Cabin': 'Unknown'}) train['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) test['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) corre = train.corr() features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Fare', 'Sex_bin', 'Family_Size'] train[features].info() y_train = train['Survived'] x_train = train[features] lr = LogisticRegression() lr.fit(x_train, y_train) print('The Score for Titanic is {:.3f}'.format(lr.score(x_train, y_train)))
code
34141752/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34141752/cell_8
[ "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer', 'Sir'] def substrings_in_string(big_string, substrings): for substring in substrings: if bool(re.search(substring, big_string)) == 1: return substring return np.nan train['Title'] = train['Name'].map(lambda x: substrings_in_string(x, title_list)) test['Title'] = test['Name'].map(lambda x: substrings_in_string(x, title_list)) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] train = train.fillna(value={'Cabin': 'Unknown'}) test = test.fillna(value={'Cabin': 'Unknown'}) train['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) test['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) train['Family_Size'] = train['SibSp'] + train['Parch'] test['Family_Size'] = test['SibSp'] + test['Parch'] train['Age'] = train['Age'].fillna(np.mean(train['Age'])) print(train)
code
34141752/cell_3
[ "text_plain_output_1.png" ]
train.head(5)
code
34141752/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer', 'Sir'] def substrings_in_string(big_string, substrings): for substring in substrings: if bool(re.search(substring, big_string)) == 1: return substring return np.nan train['Title'] = train['Name'].map(lambda x: substrings_in_string(x, title_list)) test['Title'] = test['Name'].map(lambda x: substrings_in_string(x, title_list)) def replace_titles(x): title = x['Title'] if title in ['Don', 'Major', 'Capt', 'Jonkheer', 'Rev', 'Col', 'Sir']: return 'Mr' elif title in ['Countess', 'Mme']: return 'Mrs' elif title in ['Mlle', 'Ms']: return 'Miss' elif title == 'Dr': if x['Sex'] == 'Male': return 'Mr' else: return 'Mrs' else: return title train['Title'] = train.apply(replace_titles, axis=1) test['Title'] = test.apply(replace_titles, axis=1) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] train = train.fillna(value={'Cabin': 'Unknown'}) test = test.fillna(value={'Cabin': 'Unknown'}) train['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) test['Cabin'] = train['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) corre = train.corr() sns.heatmap(corre, annot=True) plt.show()
code
105200129/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/chocolate-bar-ratings/chocolate_bars.csv') data.isnull().sum() data.isnull().sum() from sklearn.preprocessing import LabelEncoder En = LabelEncoder() Enco = En.fit_transform(data['bean_origin']) data.drop('bean_origin', axis=1, inplace=True) data['bean_origin'] = Enco data.isnull().sum()
code