path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73082264/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.head()
code
73082264/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T def summary(df): Types = df.dtypes Counts = df.apply(lambda x: x.count()) Uniques = df.apply(lambda x: x.unique().shape[0]) Nulls = df.apply(lambda x: x.isnull().sum()) cols = ['Types', 'Counts', 'Uniques', 'Nulls'] str = pd.concat([Types, Counts, Uniques, Nulls], axis=1, sort=True) str.columns = cols summary(df) col = df.columns.to_list() catcol = [_ for _ in col if df[_].nunique() < 30] termination_reasion_code = df.term_reas_code.unique() termination_reasion_code_description = df.term_reas_desc.unique() termination_reasion = dict(zip(termination_reasion_code, termination_reasion_code_description)) termination_reasion
code
73082264/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df
code
73082264/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T def summary(df): Types = df.dtypes Counts = df.apply(lambda x: x.count()) Uniques = df.apply(lambda x: x.unique().shape[0]) Nulls = df.apply(lambda x: x.isnull().sum()) cols = ['Types', 'Counts', 'Uniques', 'Nulls'] str = pd.concat([Types, Counts, Uniques, Nulls], axis=1, sort=True) str.columns = cols summary(df) col = df.columns.to_list() catcol = [_ for _ in col if df[_].nunique() < 30] termination_reasion_code = df.term_reas_code.unique() termination_reasion_code_description = df.term_reas_desc.unique() termination_reasion = dict(zip(termination_reasion_code, termination_reasion_code_description)) termination_reasion df.drop(columns=['bill_cycl', 'serv_type', 'serv_code', 'term_reas_desc'], inplace=True) df.head()
code
73082264/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T def summary(df): Types = df.dtypes Counts = df.apply(lambda x: x.count()) Uniques = df.apply(lambda x: x.unique().shape[0]) Nulls = df.apply(lambda x: x.isnull().sum()) cols = ['Types', 'Counts', 'Uniques', 'Nulls'] str = pd.concat([Types, Counts, Uniques, Nulls], axis=1, sort=True) str.columns = cols summary(df) col = df.columns.to_list() catcol = [_ for _ in col if df[_].nunique() < 30] for _ in catcol: print('{} has {} unique value/s - {}\n'.format(_, df[_].nunique(), df[_].unique()))
code
73082264/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) df.churn.replace('N', '0', inplace=True) df.churn.replace('Y', '1', inplace=True) df.current_mth_churn.replace('N', '0', inplace=True) df.current_mth_churn.replace('Y', '1', inplace=True) df.churn = df.churn.astype(int) df.current_mth_churn = df.churn.astype(int) df.describe().T def summary(df): Types = df.dtypes Counts = df.apply(lambda x: x.count()) Uniques = df.apply(lambda x: x.unique().shape[0]) Nulls = df.apply(lambda x: x.isnull().sum()) cols = ['Types', 'Counts', 'Uniques', 'Nulls'] str = pd.concat([Types, Counts, Uniques, Nulls], axis=1, sort=True) str.columns = cols display(str.sort_values(by='Nulls', ascending=False)) print('__________Data Types__________\n') print(str.Types.value_counts()) summary(df)
code
73082264/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/broadband-customers-base-churn-analysis/bbs_cust_base_scfy_20200210.csv') df = data.copy() df df.drop('Unnamed: 19', axis=1, inplace=True) print(df.shape) print(df.ndim) print(df.size)
code
122260629/cell_9
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/no-ground/fake2no_train.csv') x = np.array(data[['theta', 'phi', 'power_fakeDB']]).reshape(len(data), 3) y = np.array(data[['theta', 'phi', 'power_noDB']]).reshape(len(data), 3) poly_reg = Pipeline([('poly', PolynomialFeatures(degree=9)), ('std_scale', StandardScaler()), ('lin_reg', LinearRegression())]) poly_reg.fit(x, y) from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error data_1 = pd.read_csv('/kaggle/input/no-ground/fake2no_test.csv') x_1 = np.array(data_1[['theta', 'phi', 'power_fakeDB']]).reshape(len(data_1), 3) y_1 = np.array(data_1[['theta', 'phi', 'power_noDB']]).reshape(len(data_1), 3) y_predict = poly_reg.predict(x_1) MAE = mean_absolute_error(y_1[:, 2], y_predict[:, 2]) MAPE = mean_absolute_percentage_error(y_1[:, 2], y_predict[:, 2]) y_predict_csv = pd.DataFrame(y_predict) y_predict_csv.to_csv('y_predict.csv') y_predict_csv.loc[122:152] y_predict_csv.loc[1692:1722]
code
122260629/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122260629/cell_7
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/no-ground/fake2no_train.csv') x = np.array(data[['theta', 'phi', 'power_fakeDB']]).reshape(len(data), 3) y = np.array(data[['theta', 'phi', 'power_noDB']]).reshape(len(data), 3) poly_reg = Pipeline([('poly', PolynomialFeatures(degree=9)), ('std_scale', StandardScaler()), ('lin_reg', LinearRegression())]) poly_reg.fit(x, y) from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error data_1 = pd.read_csv('/kaggle/input/no-ground/fake2no_test.csv') x_1 = np.array(data_1[['theta', 'phi', 'power_fakeDB']]).reshape(len(data_1), 3) y_1 = np.array(data_1[['theta', 'phi', 'power_noDB']]).reshape(len(data_1), 3) y_predict = poly_reg.predict(x_1) MAE = mean_absolute_error(y_1[:, 2], y_predict[:, 2]) MAPE = mean_absolute_percentage_error(y_1[:, 2], y_predict[:, 2]) print(MAE) print(MAPE)
code
122260629/cell_8
[ "text_html_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/no-ground/fake2no_train.csv') x = np.array(data[['theta', 'phi', 'power_fakeDB']]).reshape(len(data), 3) y = np.array(data[['theta', 'phi', 'power_noDB']]).reshape(len(data), 3) poly_reg = Pipeline([('poly', PolynomialFeatures(degree=9)), ('std_scale', StandardScaler()), ('lin_reg', LinearRegression())]) poly_reg.fit(x, y) from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_percentage_error data_1 = pd.read_csv('/kaggle/input/no-ground/fake2no_test.csv') x_1 = np.array(data_1[['theta', 'phi', 'power_fakeDB']]).reshape(len(data_1), 3) y_1 = np.array(data_1[['theta', 'phi', 'power_noDB']]).reshape(len(data_1), 3) y_predict = poly_reg.predict(x_1) MAE = mean_absolute_error(y_1[:, 2], y_predict[:, 2]) MAPE = mean_absolute_percentage_error(y_1[:, 2], y_predict[:, 2]) y_predict_csv = pd.DataFrame(y_predict) y_predict_csv.to_csv('y_predict.csv') y_predict_csv.loc[122:152]
code
122260629/cell_5
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures from sklearn.preprocessing import StandardScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/no-ground/fake2no_train.csv') x = np.array(data[['theta', 'phi', 'power_fakeDB']]).reshape(len(data), 3) y = np.array(data[['theta', 'phi', 'power_noDB']]).reshape(len(data), 3) poly_reg = Pipeline([('poly', PolynomialFeatures(degree=9)), ('std_scale', StandardScaler()), ('lin_reg', LinearRegression())]) poly_reg.fit(x, y)
code
1003966/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) ## Comparation between victims by age // ToDo adjust plot homicides_adult.to_frame() homicides_young.to_frame() homicides = pd.DataFrame({'Adult': homicides_adult,'Young':homicides_young}) homicides.sort_index(inplace=True) pos = list(range(len(homicides['Adult']))) width = 0.25 # Plotting the bars fig, ax = plt.subplots(figsize=(25,15)) # in position pos, plt.bar(pos, #using homicides['Adult'] data, homicides['Adult'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224', # with label the first value in year label=homicides.index[0]) # Create a bar with young data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using homicides['Young'] data, homicides['Young'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#F78F1E', # with label the second value in year label=homicides.index[1]) # Set the y axis label ax.set_ylabel('Adult / Young') # Set the chart's title ax.set_title('Comparation between victims by age') # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(homicides.index) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim([0, max(homicides['Adult'] + homicides['Young'])] ) # Adding the legend and showing the plot plt.legend(['Adult', 'Young'], loc='upper left') plt.grid() plt.show() # Sex of the perpetrators perpetrator_sex = pd.DataFrame(data, columns = ['Perpetrator Sex']) count_perpetrator_sex = perpetrator_sex.stack().value_counts() ax = count_perpetrator_sex.plot(kind = 'pie', title = 'Sex of the perpetrators', startangle = 10, autopct='%.2f') ax.set_ylabel('') crime_types = pd.DataFrame(data, columns=['Crime Type']) count_types = crime_types.stack().value_counts() count_crime_types = count_types.sort_index(axis=0, ascending=False) ax = count_crime_types.plot(kind='pie', title='Crime Types', startangle=25, autopct='%.2f') ax.set_ylabel('')
code
1003966/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') sex = pd.DataFrame(data, columns=['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind='pie', title='Sex of the victims', startangle=10, autopct='%.2f') ax.set_ylabel('')
code
1003966/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) print(homicides_young.plot(kind='barh', fontsize=10, width=0.5, figsize=(12, 10), title='Victims under 21 years old'))
code
1003966/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt plt.style.use('fivethirtyeight') data = pd.read_csv('../input/database.csv', na_values=['NA'], dtype='unicode') years = pd.DataFrame(data, columns=['Year']) count_years = years.stack().value_counts() homicides = count_years.sort_index(axis=0, ascending=False) homicides.plot(kind='barh', fontsize=10, width=0.5, figsize=(12, 10), title='Homicides in EEUU between 1980 and 2014')
code
1003966/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) homicides_adult.to_frame() homicides_young.to_frame() homicides = pd.DataFrame({'Adult': homicides_adult, 'Young': homicides_young}) homicides.sort_index(inplace=True) pos = list(range(len(homicides['Adult']))) width = 0.25 fig, ax = plt.subplots(figsize=(25, 15)) plt.bar(pos, homicides['Adult'], width, alpha=0.5, color='#EE3224', label=homicides.index[0]) plt.bar([p + width for p in pos], homicides['Young'], width, alpha=0.5, color='#F78F1E', label=homicides.index[1]) ax.set_ylabel('Adult / Young') ax.set_title('Comparation between victims by age') ax.set_xticks([p + 1.5 * width for p in pos]) ax.set_xticklabels(homicides.index) plt.xlim(min(pos) - width, max(pos) + width * 4) plt.ylim([0, max(homicides['Adult'] + homicides['Young'])]) plt.legend(['Adult', 'Young'], loc='upper left') plt.grid() plt.show()
code
1003966/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) ## Comparation between victims by age // ToDo adjust plot homicides_adult.to_frame() homicides_young.to_frame() homicides = pd.DataFrame({'Adult': homicides_adult,'Young':homicides_young}) homicides.sort_index(inplace=True) pos = list(range(len(homicides['Adult']))) width = 0.25 # Plotting the bars fig, ax = plt.subplots(figsize=(25,15)) # in position pos, plt.bar(pos, #using homicides['Adult'] data, homicides['Adult'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224', # with label the first value in year label=homicides.index[0]) # Create a bar with young data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using homicides['Young'] data, homicides['Young'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#F78F1E', # with label the second value in year label=homicides.index[1]) # Set the y axis label ax.set_ylabel('Adult / Young') # Set the chart's title ax.set_title('Comparation between victims by age') # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(homicides.index) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim([0, max(homicides['Adult'] + homicides['Young'])] ) # Adding the legend and showing the plot plt.legend(['Adult', 'Young'], loc='upper left') plt.grid() plt.show() perpetrator_sex = pd.DataFrame(data, columns=['Perpetrator Sex']) count_perpetrator_sex = perpetrator_sex.stack().value_counts() ax = count_perpetrator_sex.plot(kind='pie', title='Sex of the perpetrators', startangle=10, autopct='%.2f') ax.set_ylabel('')
code
1003966/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) solved = pd.DataFrame(data, columns=['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind='pie', title='Crimes solved between 1980 & 2014 (in %)', startangle=10, autopct='%.2f') ax.set_ylabel('')
code
1003966/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) ## Comparation between victims by age // ToDo adjust plot homicides_adult.to_frame() homicides_young.to_frame() homicides = pd.DataFrame({'Adult': homicides_adult,'Young':homicides_young}) homicides.sort_index(inplace=True) pos = list(range(len(homicides['Adult']))) width = 0.25 # Plotting the bars fig, ax = plt.subplots(figsize=(25,15)) # in position pos, plt.bar(pos, #using homicides['Adult'] data, homicides['Adult'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224', # with label the first value in year label=homicides.index[0]) # Create a bar with young data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using homicides['Young'] data, homicides['Young'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#F78F1E', # with label the second value in year label=homicides.index[1]) # Set the y axis label ax.set_ylabel('Adult / Young') # Set the chart's title ax.set_title('Comparation between victims by age') # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(homicides.index) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim([0, max(homicides['Adult'] + homicides['Young'])] ) # Adding the legend and showing the plot plt.legend(['Adult', 'Young'], loc='upper left') plt.grid() plt.show() # Sex of the perpetrators perpetrator_sex = pd.DataFrame(data, columns = ['Perpetrator Sex']) count_perpetrator_sex = perpetrator_sex.stack().value_counts() ax = count_perpetrator_sex.plot(kind = 'pie', title = 'Sex of the perpetrators', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Crime types crime_types = pd.DataFrame(data, columns = ['Crime Type']) count_types = crime_types.stack().value_counts() count_crime_types = count_types.sort_index(axis=0, ascending=False) #plot the total of homicides ax = count_crime_types.plot(kind = 'pie', title = 'Crime Types', startangle = 25, autopct='%.2f') ax.set_ylabel('') state = pd.DataFrame(data, columns=['State']) count_states = state.stack().value_counts() states = count_states.sort_index(axis=0, ascending=False) print(states.plot(kind='barh', fontsize=10, width=0.5, figsize=(12, 10), title='Homicides in EEUU by State between 1980 and 2014'))
code
1003966/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from matplotlib.collections import PatchCollection from matplotlib.colors import Normalize from matplotlib.patches import Polygon from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Race of Victims race = pd.DataFrame(data, columns = ['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind = 'pie', title = 'Race of the victims', startangle = 10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('') data['Victim Age'] = data['Victim Age'].astype('int') mask = data['Victim Age'] < 21 young_victims = pd.DataFrame(data.loc[mask], columns=['Year']) count_years = young_victims.stack().value_counts() homicides_young = count_years.sort_index(axis=0, ascending=False) mask2 = data['Victim Age'] > 21 adult_victims = pd.DataFrame(data.loc[mask2], columns=['Year']) count_years = adult_victims.stack().value_counts() homicides_adult = count_years.sort_index(axis=0, ascending=False) ## Comparation between victims by age // ToDo adjust plot homicides_adult.to_frame() homicides_young.to_frame() homicides = pd.DataFrame({'Adult': homicides_adult,'Young':homicides_young}) homicides.sort_index(inplace=True) pos = list(range(len(homicides['Adult']))) width = 0.25 # Plotting the bars fig, ax = plt.subplots(figsize=(25,15)) # in position pos, plt.bar(pos, #using homicides['Adult'] data, homicides['Adult'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#EE3224', # with label the first value in year label=homicides.index[0]) # Create a bar with young data, # in position pos + some width buffer, plt.bar([p + width for p in pos], #using homicides['Young'] data, homicides['Young'], # of width width, # with alpha 0.5 alpha=0.5, # with color color='#F78F1E', # with label the second value in year label=homicides.index[1]) # Set the y axis label ax.set_ylabel('Adult / Young') # Set the chart's title ax.set_title('Comparation between victims by age') # Set the position of the x ticks ax.set_xticks([p + 1.5 * width for p in pos]) # Set the labels for the x ticks ax.set_xticklabels(homicides.index) # Setting the x-axis and y-axis limits plt.xlim(min(pos)-width, max(pos)+width*4) plt.ylim([0, max(homicides['Adult'] + homicides['Young'])] ) # Adding the legend and showing the plot plt.legend(['Adult', 'Young'], loc='upper left') plt.grid() plt.show() # Sex of the perpetrators perpetrator_sex = pd.DataFrame(data, columns = ['Perpetrator Sex']) count_perpetrator_sex = perpetrator_sex.stack().value_counts() ax = count_perpetrator_sex.plot(kind = 'pie', title = 'Sex of the perpetrators', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Crime types crime_types = pd.DataFrame(data, columns = ['Crime Type']) count_types = crime_types.stack().value_counts() count_crime_types = count_types.sort_index(axis=0, ascending=False) #plot the total of homicides ax = count_crime_types.plot(kind = 'pie', title = 'Crime Types', startangle = 25, autopct='%.2f') ax.set_ylabel('') state = pd.DataFrame(data, columns=['State']) count_states = state.stack().value_counts() states = count_states.sort_index(axis=0, ascending=False) import matplotlib.pyplot as plt import matplotlib.cm from mpl_toolkits.basemap import Basemap from matplotlib.patches import Polygon from matplotlib.collections import PatchCollection from matplotlib.colors import Normalize states_eeuu = pd.DataFrame({'homicides': states, 'state': states.index}) states_name = states_eeuu.index fig, ax = plt.subplots(figsize=(20, 10)) m = Basemap(resolution='h', projection='lcc', lat_1=33, lat_2=45, lon_0=-95, llcrnrlon=-119, llcrnrlat=22, urcrnrlon=-64, urcrnrlat=49) m.readshapefile('../input/st99_d00', 'states') m.drawmapboundary(fill_color='#46bcec') m.fillcontinents(color='#f2f2f2', lake_color='#46bcec') m.drawcoastlines() geo = pd.DataFrame({'shapes': [Polygon(np.array(shape), True) for shape in m.states], 'state': [state['NAME'] for state in m.states_info]}) geo = geo.merge(states_eeuu, on='state', how='left') cmap = plt.get_cmap('Oranges') pc = PatchCollection(geo.shapes, zorder=2) norm = Normalize() pc.set_facecolor(cmap(norm(geo['homicides'].fillna(0).values))) ax.add_collection(pc) mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap) mapper.set_array(geo['homicides']) plt.colorbar(mapper, shrink=0.4) plt.title('Geographic homicide distribution')
code
1003966/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) ## Rate of crimes solved solved = pd.DataFrame(data, columns = ['Crime Solved']) resolution = solved.stack().value_counts() ax = resolution.plot(kind = 'pie', title = 'Crimes solved between 1980 & 2014 (in %)', startangle = 10, autopct='%.2f') ax.set_ylabel('') #Gender of victims sex = pd.DataFrame(data, columns = ['Victim Sex']) count_sex = sex.stack().value_counts() ax = count_sex.plot(kind = 'pie', title = 'Sex of the victims', startangle = 10, autopct='%.2f') ax.set_ylabel('') race = pd.DataFrame(data, columns=['Victim Race']) count_race = race.stack().value_counts() ax = count_race.plot(kind='pie', title='Race of the victims', startangle=10, autopct='%.2f', explode=(0, 0, 0.7, 1, 1.3)) ax.set_ylabel('')
code
90106983/cell_42
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=model, ax=ax1) sns.heatmap(model,annot=True,ax= ax2) year = np.corrcoef(data['year'], data['price']) year f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=year, ax=ax1) sns.heatmap(year,annot=True,ax= ax2) transmission = np.corrcoef(data['transmission'], data['price']) transmission f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=transmission, ax=ax1) sns.heatmap(transmission, annot=True, ax=ax2)
code
90106983/cell_13
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() print(DuplicatedData)
code
90106983/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') data.info()
code
90106983/cell_34
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=model, ax=ax1) sns.heatmap(model, annot=True, ax=ax2)
code
90106983/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model year = np.corrcoef(data['year'], data['price']) year transmission = np.corrcoef(data['transmission'], data['price']) transmission mileage = np.corrcoef(data['mileage'], data['price']) mileage
code
90106983/cell_20
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) plt.figure(figsize=(15, 10)) sns.heatmap(corr, annot=True)
code
90106983/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') print(data.shape) data.head()
code
90106983/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model year = np.corrcoef(data['year'], data['price']) year transmission = np.corrcoef(data['transmission'], data['price']) transmission
code
90106983/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr data.head()
code
90106983/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') print(data.isnull().sum())
code
90106983/cell_19
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr mask = np.triu(np.ones_like(corr, dtype=bool)) f, ax = plt.subplots(figsize=(11, 9)) cmap = sns.diverging_palette(230, 20, as_cmap=True) sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5})
code
90106983/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90106983/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model
code
90106983/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') print(data.columns.values)
code
90106983/cell_38
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=model, ax=ax1) sns.heatmap(model,annot=True,ax= ax2) year = np.corrcoef(data['year'], data['price']) year f, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5), sharex=True) sns.kdeplot(data=year, ax=ax1) sns.heatmap(year, annot=True, ax=ax2)
code
90106983/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr
code
90106983/cell_24
[ "text_html_output_1.png" ]
from pandas_profiling import ProfileReport import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr from pandas_profiling import ProfileReport profile = ProfileReport(data, title='Pandas profiling report ', html={'style': {'full_width': True}}) profile.to_notebook_iframe()
code
90106983/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() print('Sum of the Dublicate in Data', DuplicatedDataSum)
code
90106983/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') data.describe()
code
90106983/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() print('Sum of the Dublicate in Data', DuplicatedDataSum)
code
90106983/cell_36
[ "text_html_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/used-car-dataset-ford-and-mercedes/ford.csv') DuplicatedDataSum = data.duplicated().sum() DuplicatedData = data.duplicated() data = data.drop_duplicates() DuplicatedDataSum = data.duplicated().sum() corr = data.corr() corr ## Draw the RelationShip between Data # Generate a mask for the upper triangle mask = np.triu(np.ones_like(corr, dtype=bool)) # Set up the matplotlib figure f, ax = plt.subplots(figsize=(11, 9)) # Generate a custom diverging colormap cmap = sns.diverging_palette(230, 20, as_cmap=True) # Draw the heatmap with the mask and correct aspect ratio sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5}) model = np.corrcoef(data['model'], data['price']) model year = np.corrcoef(data['year'], data['price']) year
code
32067324/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72085259/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') train.head()
code
72085259/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) data['Age'] = data.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median())) data['Age'] = data['Age'].astype(int) data.loc[data['Age'] <= 15, 'Age'] = 0 data.loc[(data['Age'] > 15) & (data['Age'] <= 30), 'Age'] = 1 data.loc[(data['Age'] > 30) & (data['Age'] <= 45), 'Age'] = 2 data.loc[(data['Age'] > 45) & (data['Age'] <= 60), 'Age'] = 3 data.loc[data['Age'] > 60, 'Age'] = 4 data.describe(include='all')
code
72085259/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') print('train:\n', train.isnull().sum()) print() print('test:\n', test.isnull().sum())
code
72085259/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
test_data = test_data.drop(['ind'], axis=1) train_data = train_data.drop(['ind'], axis=1) train_data.head()
code
72085259/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) sns.barplot(x=data['Deck'], y=data['Survived'])
code
72085259/cell_38
[ "text_html_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import metrics from sklearn import svm from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import RandomForestClassifier, VotingClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) data['Age'] = data.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median())) data['Age'] = data['Age'].astype(int) data.loc[data['Age'] <= 15, 'Age'] = 0 data.loc[(data['Age'] > 15) & (data['Age'] <= 30), 'Age'] = 1 data.loc[(data['Age'] > 30) & (data['Age'] <= 45), 'Age'] = 2 data.loc[(data['Age'] > 45) & (data['Age'] <= 60), 'Age'] = 3 data.loc[data['Age'] > 60, 'Age'] = 4 data['Fare_bins'] = pd.cut(data['Fare'], bins=[0.0, 7.895, 14.45, 31.275, 512.329], labels=[0, 1, 2, 3]) data['Fare_bins'] = data['Fare_bins'].fillna(0) data['Fare_bins'] = data['Fare_bins'].astype(int) test_data = test_data.drop(['ind'], axis=1) train_data = train_data.drop(['ind'], axis=1) X = train_data.drop(['Survived'], axis=1) y = train_data['Survived'] traindf_X = pd.get_dummies(X, columns=['Sex', 'Embarked', 'Fare_bins', 'Deck'], prefix=['Sex', 'Embarked', 'Fare_type', 'Deck']) testdf = pd.get_dummies(test_data, columns=['Sex', 'Embarked', 'Fare_bins', 'Deck'], prefix=['Sex', 'Embarked', 'Fare_type', 'Deck']).drop(['Survived'], axis=1) from sklearn import metrics def get_scores(y_preds, y): return {'Accuracy': metrics.accuracy_score(y_preds, y), 'Precision': metrics.precision_score(y_preds, y), 'Recall': metrics.recall_score(y_preds, y), 'F1': metrics.f1_score(y_preds, y), 'ROC_AUC': metrics.roc_auc_score(y_preds, y)} def train_model(model): model_ = model model_.fit(X_train, y_train) y_pred = model_.predict(X_val) return get_scores(y_pred, y_val) model_list = [DecisionTreeClassifier(random_state=42), RandomForestClassifier(random_state=42), XGBClassifier(random_state=42), LGBMClassifier(random_state=42, is_unbalance=True), LogisticRegression(random_state=42), svm.SVC(random_state=42), AdaBoostClassifier(random_state=42), KNeighborsClassifier(), GaussianNB()] model_names = ['Decision Tree', 'Random Forest', 'XGB Classifier', 'LGBM Classifier', 'Logistic Regression', 'SVC', 'AdaBoost ', 'KNN', 'GaussianNB'] scores = pd.DataFrame(columns=['Name', 'Accuracy', 'Precision', 'Recall', 'F1', 'ROC_AUC']) for i in range(len(model_list)): score = train_model(model_list[i]) scores.loc[i] = [model_names[i]] + list(score.values()) figure, axis = plt.subplots(2, 3) figure.set_figheight(15) figure.set_figwidth(30) for i in range(2): for j in range(3): axis[i, j].set_xlim([0.5, 0.9]) axis[0, 0].barh(scores['Name'], scores['Accuracy'], height=0.5) axis[0, 0].set_title('Accuracy Score') axis[0, 1].barh(scores['Name'], scores['Precision'], height=0.5) axis[0, 1].set_title('Precision') axis[1, 0].barh(scores['Name'], scores['Recall'], height=0.5) axis[1, 0].set_title('Recall') axis[1, 2].barh(scores['Name'], scores['F1'], height=0.5) axis[1, 2].set_title('F1') axis[0, 2].barh(scores['Name'], scores['ROC_AUC'], height=0.5) axis[0, 2].set_title('ROC_AUC') axis[1, 1].set_visible(False) plt.show()
code
72085259/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) data['Age'] = data.groupby(['Sex', 'Pclass'])['Age'].apply(lambda x: x.fillna(x.median())) data['Age'] = data['Age'].astype(int) data.loc[data['Age'] <= 15, 'Age'] = 0 data.loc[(data['Age'] > 15) & (data['Age'] <= 30), 'Age'] = 1 data.loc[(data['Age'] > 30) & (data['Age'] <= 45), 'Age'] = 2 data.loc[(data['Age'] > 45) & (data['Age'] <= 60), 'Age'] = 3 data.loc[data['Age'] > 60, 'Age'] = 4 data.head()
code
72085259/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) sns.barplot(x=data['Deck'], y=data['Survived'])
code
72085259/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') data = pd.concat([train.assign(ind='train'), test.assign(ind='test')], ignore_index=True) print(train.Cabin.unique())
code
72085259/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') submission_df = pd.read_csv('/kaggle/input/titanic/gender_submission.csv') test.head()
code
128002897/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from numba import njit, prange from tqdm.notebook import tnrange import numpy as np import numpy as np import numpy as np tracedata = np.load('/kaggle/input/sca-simple-xor-cipher-dataset/2023.04.08-14.10.20_0traces.npy') textindata = np.load('/kaggle/input/sca-simple-xor-cipher-dataset/2023.04.08-14.10.20_0textin.npy') textoutdata = np.load('/kaggle/input/sca-simple-xor-cipher-dataset/2023.04.08-14.10.20_0textin.npy') key = np.load('/kaggle/input/sca-simple-xor-cipher-dataset/2023.04.08-14.10.20_0knownkey.npy', allow_pickle=True) import numpy as np from tqdm.notebook import tnrange from numba import njit, prange def mean(X): return np.sum(X, axis=0) / len(X) def std_dev(X, X_bar): return np.sqrt(np.sum((X - X_bar) ** 2, axis=0)) def cov(X, X_bar, Y, Y_bar): return np.sum((X - X_bar) * (Y - Y_bar), axis=0) def xor_internal(inputdata, key): return inputdata ^ key HW = [bin(n).count('1') for n in range(0, 256)] t_bar = np.sum(tracedata[:1000], axis=0) / len(tracedata[:1000]) o_t = np.sqrt(np.sum((tracedata[:1000] - t_bar) ** 2, axis=0)) cparefs = [0] * 16 bestguess = [0] * 16 for bnum in tnrange(0, 16): maxcpa = [0] * 256 for kguess in prange(0, 256): hws = np.array([[HW[xor_internal(textin[bnum], kguess)] for textin in textindata]]).transpose() hws_bar = mean(hws) o_hws = std_dev(hws, hws_bar) correlation = cov(tracedata[:1000], t_bar, hws, hws_bar) cpaoutput = correlation / (o_t * o_hws) maxcpa[kguess] = max(abs(cpaoutput)) bestguess[bnum] = np.argmax(maxcpa) cparefs[bnum] = max(maxcpa) print('Best Key Guess: ', end='') for b in bestguess: print('%02x ' % b, end='') print('\n', cparefs)
code
128002897/cell_1
[ "text_plain_output_1.png" ]
!pip install numpy !pip install numba !pip install matplotlib
code
128002897/cell_3
[ "image_output_1.png" ]
import matplotlib.pylab as plt import binascii plt.plot(np.mean(tracedata, axis=0), 'r') plt.legend() plt.show()
code
2032344/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd def computeCost(x, y, theta): h = x.dot(theta) cost = sum(pow(h - y, 2)) / (2 * m) return cost def gradientDescent(x, y, theta, alpha, iterations): computed_theta = theta for i in range(0, iterations): h = x.dot(computed_theta) computed_theta[0] = computed_theta[0] - alpha * (1 / m) * sum(h - y) computed_theta[1] = computed_theta[1] - alpha * (1 / m) * sum((h - y) * X[:, 1]) return computed_theta data = pd.read_csv('../input/ex1data1.txt', header=None) X = data.iloc[:, 0].values y = data.iloc[:, 1].values m = y.size plt.scatter(X, y, marker='x') plt.xlabel('Population of City in 10,000s') plt.ylabel('Profit in $10,000s') plt.show() X = np.concatenate((np.ones((m, 1), dtype=np.int), X.reshape(m, 1)), axis=1) print(X.size) print('Testing the cost function with theta = [0 ; 0]') J = computeCost(X, y, np.array([0, 0])) print('Expected cost value (approx): 32.07') print('Actual cost value: {}\n'.format(J)) print('Testing the cost function with theta = [-1 ; 2]') J = computeCost(X, y, np.array([-1, 2])) print('Expected cost value (approx): 54.24') print('Actual cost value: {}\n'.format(J)) theta = np.zeros(2) alpha = 0.01 iterations = 1500 print('Running Gradient Descent') theta = gradientDescent(X, y, theta, alpha, iterations) print('Expected theta value (approx): [-3.6303, 1.1664]') print('Actual theta value: {}\n'.format(theta)) plt.scatter(X[:, 1], y, marker='x', label='Training data') plt.plot(X[:, 1], X.dot(theta), color='r', label='Linear regression') plt.xlabel('Population of City in 10,000s') plt.ylabel('Profit in $10,000s') plt.legend() plt.show() predict1 = np.array([1, 3.5]).dot(theta) print('For population of 35,000 we predict a profit of {}'.format(predict1 * 10000)) predict2 = np.array([1, 7]).dot(theta) print('For population of 70,000 we predict a profit of {}'.format(predict2 * 10000))
code
106195648/cell_6
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df from sklearn.cluster import KMeans km = KMeans(init='random', random_state=0) y_km = km.fit_predict(df) km = KMeans(max_iter=300, n_init=10) elbow_k = kelbow_visualizer(km, df, k=(2, 15))
code
106195648/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df
code
106195648/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106195648/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df from sklearn.cluster import KMeans km = KMeans(init='random', random_state=0) y_km = km.fit_predict(df) km = KMeans(max_iter=300, n_init=10) elbow_k = kelbow_visualizer(km, df, k=(2, 15)) kmeans = KMeans(n_clusters=5, n_init=10, max_iter=300) label_pred = kmeans.fit_predict(df) print('The best number of cluster is: ', elbow_k.elbow_value_)
code
106195648/cell_8
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df from sklearn.cluster import KMeans km = KMeans(init='random', random_state=0) y_km = km.fit_predict(df) km = KMeans(max_iter=300, n_init=10) elbow_k = kelbow_visualizer(km, df, k=(2, 15)) kmeans = KMeans(n_clusters=5, n_init=10, max_iter=300) label_pred = kmeans.fit_predict(df) silhouette = silhouette_visualizer(kmeans, df) print('The Average Silhouette score is: ', silhouette.silhouette_score_)
code
106195648/cell_3
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df df_st = StandardScaler() df_st.fit(df) df_ = df_st.transform(df) df_
code
106195648/cell_10
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd from sklearn.preprocessing import StandardScaler df = pd.read_csv('../input/tabular-playground-series-jul-2022/data.csv') df from sklearn.cluster import KMeans km = KMeans(init='random', random_state=0) y_km = km.fit_predict(df) km = KMeans(max_iter=300, n_init=10) elbow_k = kelbow_visualizer(km, df, k=(2, 15)) kmeans = KMeans(n_clusters=5, n_init=10, max_iter=300) label_pred = kmeans.fit_predict(df) df_submit = pd.read_csv('../input/tabular-playground-series-jul-2022/sample_submission.csv') df_submit['Predicted'] = label_pred df_submit
code
90148683/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') test_data.head()
code
90148683/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') train_data = train_data.drop(['location'], axis=1) test_data = test_data.drop(['location'], axis=1) train_data.shape
code
90148683/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90148683/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') submission.head()
code
90148683/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') print('Number of missing data for column keyword: ', train_data['keyword'].isna().sum()) print('Number of missing data for column location: ', train_data['location'].isna().sum()) print('Number of missing data for column text: ', train_data['text'].isna().sum()) print('Number of missing data for column target: ', train_data['target'].isna().sum())
code
90148683/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') train_data = train_data.drop(['location'], axis=1) test_data = test_data.drop(['location'], axis=1) test_data.shape
code
90148683/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_data = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test_data = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') submission = pd.read_csv('/kaggle/input/nlp-getting-started/sample_submission.csv') train_data.head()
code
122249638/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import cv2 import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow as tf path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) import numpy as np import pandas as pd import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) imgar = [] y = pd.DataFrame(columns=['apl', 'ban', 'orn', 'mix']) for i in range(len(imgs)): img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) imgar.append(np.array(img)) if imgs[i][0] == 'a': y.loc[i, 'apl'] = 1 elif imgs[i][0] == 'b': y.loc[i, 'ban'] = 1 elif imgs[i][0] == 'o': y.loc[i, 'orn'] = 1 else: y.loc[i, 'mix'] = 1 imgarr = np.array([imgar]) imgarr = np.reshape(imgarr, (240, 1, 28, 28, 3)) y = y.replace(np.nan, 0) y = np.array([y]) y = np.reshape(y, (240, 4)) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) modl = tf.keras.Sequential() modl.add(tf.keras.layers.InputLayer(input_shape=(1, 28, 28, 3))) modl.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) modl.add(tf.keras.layers.Flatten()) modl.add(tf.keras.layers.Dense(units=64, activation='relu')) modl.add(tf.keras.layers.Dense(32, activation='relu')) modl.add(tf.keras.layers.Dense(4, activation='softmax')) modl.summary() optm = tf.keras.optimizers.Adam(learning_rate=0.001) modl.compile(loss='categorical_crossentropy', optimizer=optm, metrics='accuracy') modl.fit(imgarr, y, epochs=50) print(np.shape(imgarr)) yprd = modl.predict(np.array([imgarr[0]])) print(yprd) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/test_zip/test/' + 'banana_80.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) xnw = np.array([img]) yprd = modl.predict(xnw) print(yprd)
code
122249638/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
122249638/cell_6
[ "text_plain_output_1.png" ]
import cv2 import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) import numpy as np import pandas as pd import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) imgar = [] y = pd.DataFrame(columns=['apl', 'ban', 'orn', 'mix']) print(fileslst[:20]) for i in range(len(imgs)): img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) imgar.append(np.array(img)) if imgs[i][0] == 'a': y.loc[i, 'apl'] = 1 elif imgs[i][0] == 'b': y.loc[i, 'ban'] = 1 elif imgs[i][0] == 'o': y.loc[i, 'orn'] = 1 else: y.loc[i, 'mix'] = 1 imgarr = np.array([imgar]) imgarr = np.reshape(imgarr, (240, 1, 28, 28, 3)) print(np.shape(imgarr)) y = y.replace(np.nan, 0) y = np.array([y]) y = np.reshape(y, (240, 4)) print(y[0]) plt.imshow(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) plt.subplot(231) plt.imshow(img[:, :, 0]) plt.subplot(232) plt.imshow(img[:, :, 1]) plt.subplot(233) plt.imshow(img[:, :, 2]) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) plt.subplot(234) plt.imshow(imgedg0) plt.subplot(235) plt.imshow(imgedg1) plt.subplot(236) plt.imshow(imgedg2)
code
122249638/cell_2
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) plt.imshow(img) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) plt.subplot(231) plt.imshow(img[:, :, 0]) plt.subplot(232) plt.imshow(img[:, :, 1]) plt.subplot(233) plt.imshow(img[:, :, 2]) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) plt.subplot(234) plt.imshow(imgedg0) plt.subplot(235) plt.imshow(imgedg1) plt.subplot(236) plt.imshow(imgedg2)
code
122249638/cell_7
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import tensorflow as tf import tensorflow as tf modl = tf.keras.Sequential() modl.add(tf.keras.layers.InputLayer(input_shape=(1, 28, 28, 3))) modl.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) modl.add(tf.keras.layers.Flatten()) modl.add(tf.keras.layers.Dense(units=64, activation='relu')) modl.add(tf.keras.layers.Dense(32, activation='relu')) modl.add(tf.keras.layers.Dense(4, activation='softmax')) modl.summary() optm = tf.keras.optimizers.Adam(learning_rate=0.001) modl.compile(loss='categorical_crossentropy', optimizer=optm, metrics='accuracy')
code
122249638/cell_8
[ "text_plain_output_1.png" ]
import cv2 import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow as tf path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) import numpy as np import pandas as pd import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) imgar = [] y = pd.DataFrame(columns=['apl', 'ban', 'orn', 'mix']) for i in range(len(imgs)): img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) imgar.append(np.array(img)) if imgs[i][0] == 'a': y.loc[i, 'apl'] = 1 elif imgs[i][0] == 'b': y.loc[i, 'ban'] = 1 elif imgs[i][0] == 'o': y.loc[i, 'orn'] = 1 else: y.loc[i, 'mix'] = 1 imgarr = np.array([imgar]) imgarr = np.reshape(imgarr, (240, 1, 28, 28, 3)) y = y.replace(np.nan, 0) y = np.array([y]) y = np.reshape(y, (240, 4)) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) modl = tf.keras.Sequential() modl.add(tf.keras.layers.InputLayer(input_shape=(1, 28, 28, 3))) modl.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) modl.add(tf.keras.layers.Flatten()) modl.add(tf.keras.layers.Dense(units=64, activation='relu')) modl.add(tf.keras.layers.Dense(32, activation='relu')) modl.add(tf.keras.layers.Dense(4, activation='softmax')) modl.summary() optm = tf.keras.optimizers.Adam(learning_rate=0.001) modl.compile(loss='categorical_crossentropy', optimizer=optm, metrics='accuracy') modl.fit(imgarr, y, epochs=50)
code
122249638/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import tensorflow as tf path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[1]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) import numpy as np import pandas as pd import os path = '/kaggle/input/fruit-images-for-object-detection/train_zip/train' fileslst = os.listdir(path) imgs = [] for fle in fileslst: if fle.endswith('.jpg'): imgs.append(fle) imgar = [] y = pd.DataFrame(columns=['apl', 'ban', 'orn', 'mix']) for i in range(len(imgs)): img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/train_zip/train/' + imgs[i]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) imgar.append(np.array(img)) if imgs[i][0] == 'a': y.loc[i, 'apl'] = 1 elif imgs[i][0] == 'b': y.loc[i, 'ban'] = 1 elif imgs[i][0] == 'o': y.loc[i, 'orn'] = 1 else: y.loc[i, 'mix'] = 1 imgarr = np.array([imgar]) imgarr = np.reshape(imgarr, (240, 1, 28, 28, 3)) y = y.replace(np.nan, 0) y = np.array([y]) y = np.reshape(y, (240, 4)) img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) imgedg = [] imgedg0 = cv2.Canny(img[:, :, 0], 120, 200) imgedg1 = cv2.Canny(img[:, :, 1], 120, 200) imgedg2 = cv2.Canny(img[:, :, 2], 120, 200) modl = tf.keras.Sequential() modl.add(tf.keras.layers.InputLayer(input_shape=(1, 28, 28, 3))) modl.add(tf.keras.layers.Conv2D(32, (3, 3), activation='relu')) modl.add(tf.keras.layers.Flatten()) modl.add(tf.keras.layers.Dense(units=64, activation='relu')) modl.add(tf.keras.layers.Dense(32, activation='relu')) modl.add(tf.keras.layers.Dense(4, activation='softmax')) modl.summary() optm = tf.keras.optimizers.Adam(learning_rate=0.001) modl.compile(loss='categorical_crossentropy', optimizer=optm, metrics='accuracy') modl.fit(imgarr, y, epochs=50) yprd = modl.predict(np.array([imgarr[0]])) img = cv2.imread('/kaggle/input/fruit-images-for-object-detection/test_zip/test/' + 'banana_80.jpg') img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (28, 28)) xnw = np.array([img]) yprd = modl.predict(xnw) modl.predict()
code
105184129/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1['track_id'].value_counts())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['program_number'].nunique())) for i in ax.containers: ax.bar_label(i,) ax = sns.barplot(x=list(df1['track_id'].unique()), y=list(df1.groupby(['track_id'])['race_number'].nunique())) for i in ax.containers: ax.bar_label(i)
code
105184129/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() ax = sns.barplot(x=list(df1['track_id'].unique()), y=list(df1['track_id'].value_counts())) for i in ax.containers: ax.bar_label(i)
code
105184129/cell_4
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum()
code
105184129/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1['track_id'].value_counts())) for i in ax.containers: ax.bar_label(i,) ax = sns.barplot(x=list(df1['track_id'].unique()), y=list(df1.groupby(['track_id'])['program_number'].nunique())) for i in ax.containers: ax.bar_label(i)
code
105184129/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import seaborn as sns
code
105184129/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() df1['track_id'].unique()
code
105184129/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1['track_id'].value_counts())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['program_number'].nunique())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['race_number'].nunique())) for i in ax.containers: ax.bar_label(i,) ax = sns.barplot(x=list(df1['track_id'].unique()), y=list(df1.groupby(['track_id'])['race_date'].nunique())) for i in ax.containers: ax.bar_label(i)
code
105184129/cell_17
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1['track_id'].value_counts())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['program_number'].nunique())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['race_number'].nunique())) for i in ax.containers: ax.bar_label(i,) ax=sns.barplot(x=list(df1['track_id'].unique()),y=list(df1.groupby(['track_id'])['race_date'].nunique())) for i in ax.containers: ax.bar_label(i,) ax = sns.barplot(x=list(df1['track_id'].unique()), y=list(df1.groupby(['track_id'])['jockey'].nunique())) for i in ax.containers: ax.bar_label(i)
code
105184129/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('/kaggle/input/big-data-derby-2022/nyra_start_table.csv') df1.isna().sum() df1.head()
code
1007792/cell_4
[ "image_output_1.png" ]
from sklearn import cluster from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import time import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) datasets = [[[0, 0]], [[0, 0]], [[0, 0]], [[0, 0]]] mu = 0.3 centers = [[(0, 0.45), (0.9, 0.5), (0.45, 0.9), (0.45, 0)], [(0, 0.2), (0.8, 0), (0.2, 1), (1, 0.8)], [(0, 0), (0.9, 0.9), (0, 0.9), (0.9, 0)], [(0, 0), (0.9, 0), (0.45, 0.779), (0.45, 0.259)]] for i, c in enumerate(centers): for x, y in c: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T datasets[i] = np.vstack((datasets[i], nums)) datasets = list(zip(datasets, ['a', 'b', 'c', 'd'])) plot_num = 1 for (X, lbl), c in zip(datasets, centers): center_colors = colors[:len(centers)] plt.xlim(-1, 2) plt.ylim(-1, 2) plt.xticks(()) plt.yticks(()) plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plot_num = 1 for i_dataset, dataset in enumerate(datasets): X, lbl = dataset X = StandardScaler().fit_transform(X) two_means = cluster.MiniBatchKMeans(n_clusters=4) algorithm = two_means name = 'MiniBatchKMeans' t0 = time.time() algorithm.fit(X) t1 = time.time() if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) if hasattr(algorithm, 'cluster_centers_'): centers = algorithm.cluster_centers_ center_colors = colors[:len(centers)] plt.xlim(-2, 2) plt.ylim(-2, 2) plt.xticks(()) plt.yticks(()) plt.text(0.99, 0.01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plot_num = 1 for i_dataset, dataset in enumerate(datasets): X, y = dataset X = StandardScaler().fit_transform(X) bandwidth = cluster.estimate_bandwidth(X, quantile=0.3) ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True) algorithm = ms name = 'MeanShift' t0 = time.time() algorithm.fit(X) t1 = time.time() if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) plt.subplot(2, 2, plot_num) if i_dataset == 0: plt.title(name, size=18) plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10) if hasattr(algorithm, 'cluster_centers_'): centers = algorithm.cluster_centers_ center_colors = colors[:len(centers)] plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors) plt.xlim(-2, 2) plt.ylim(-2, 2) plt.xticks(()) plt.yticks(()) plt.text(0.99, 0.01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plt.show()
code
1007792/cell_2
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) datasets = [[[0, 0]], [[0, 0]], [[0, 0]], [[0, 0]]] mu = 0.3 centers = [[(0, 0.45), (0.9, 0.5), (0.45, 0.9), (0.45, 0)], [(0, 0.2), (0.8, 0), (0.2, 1), (1, 0.8)], [(0, 0), (0.9, 0.9), (0, 0.9), (0.9, 0)], [(0, 0), (0.9, 0), (0.45, 0.779), (0.45, 0.259)]] for i, c in enumerate(centers): for x, y in c: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T datasets[i] = np.vstack((datasets[i], nums)) datasets = list(zip(datasets, ['a', 'b', 'c', 'd'])) plot_num = 1 for (X, lbl), c in zip(datasets, centers): plt.subplot(2, 2, plot_num) if i_dataset == 0: plt.title(name, size=18) plt.scatter(X[:, 0], X[:, 1], s=10) center_colors = colors[:len(centers)] plt.scatter(list(zip(*c))[0], list(zip(*c))[1], s=100, c=center_colors) plt.xlim(-1, 2) plt.ylim(-1, 2) plt.xticks(()) plt.yticks(()) plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plt.show()
code
1007792/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) plt.figure(figsize=((len(clustering_names) * 2 + 3) * 2, 9.5 * 2)) plt.subplots_adjust(left=0.02, right=0.98, bottom=0.001, top=0.96, wspace=0.05, hspace=0.01)
code
1007792/cell_3
[ "image_output_1.png" ]
from sklearn import cluster from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import time import time import numpy as np import matplotlib.pyplot as plt from sklearn import cluster from sklearn.neighbors import kneighbors_graph from sklearn.preprocessing import StandardScaler from pylab import rcParams np.random.seed(0) n_samples = 500 colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk']) colors = np.hstack([colors] * 20) datasets = [[[0, 0]], [[0, 0]], [[0, 0]], [[0, 0]]] mu = 0.3 centers = [[(0, 0.45), (0.9, 0.5), (0.45, 0.9), (0.45, 0)], [(0, 0.2), (0.8, 0), (0.2, 1), (1, 0.8)], [(0, 0), (0.9, 0.9), (0, 0.9), (0.9, 0)], [(0, 0), (0.9, 0), (0.45, 0.779), (0.45, 0.259)]] for i, c in enumerate(centers): for x, y in c: num1 = np.random.normal(x, mu, n_samples) num2 = np.random.normal(y, mu, n_samples) nums = np.vstack((num1, num2)).T datasets[i] = np.vstack((datasets[i], nums)) datasets = list(zip(datasets, ['a', 'b', 'c', 'd'])) plot_num = 1 for (X, lbl), c in zip(datasets, centers): center_colors = colors[:len(centers)] plt.xlim(-1, 2) plt.ylim(-1, 2) plt.xticks(()) plt.yticks(()) plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plot_num = 1 for i_dataset, dataset in enumerate(datasets): X, lbl = dataset X = StandardScaler().fit_transform(X) two_means = cluster.MiniBatchKMeans(n_clusters=4) algorithm = two_means name = 'MiniBatchKMeans' t0 = time.time() algorithm.fit(X) t1 = time.time() if hasattr(algorithm, 'labels_'): y_pred = algorithm.labels_.astype(np.int) else: y_pred = algorithm.predict(X) plt.subplot(2, 2, plot_num) if i_dataset == 0: plt.title(name, size=18) plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10) if hasattr(algorithm, 'cluster_centers_'): centers = algorithm.cluster_centers_ center_colors = colors[:len(centers)] plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors) plt.xlim(-2, 2) plt.ylim(-2, 2) plt.xticks(()) plt.yticks(()) plt.text(0.99, 0.01, ('%.2fs' % (t1 - t0)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right') plt.text(0.01, 0.01, lbl, transform=plt.gca().transAxes, size=15, horizontalalignment='left') plot_num += 1 plt.show()
code
129018650/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from IPython.display import clear_output from tensorflow.keras import layers, models, optimizers, losses import chess import numpy as np import chess import numpy as np import tensorflow as tf from tensorflow.keras import layers, models, optimizers, losses from tensorflow.keras.layers import LeakyReLU import concurrent.futures from IPython.display import clear_output import time def create_chess_model(): inputs = layers.Input(shape=(12, 8, 8)) x = layers.Conv2D(32, (3, 3), padding='same', activation='relu')(inputs) x = layers.Conv2D(64, (3, 3), padding='same', activation='relu')(x) x = layers.Conv2D(8, (3, 3), padding='same', activation='relu')(x) x = layers.Attention(use_scale=True)([x, inputs]) x = layers.Flatten()(x) x = layers.Dense(256, activation='relu')(x) x = layers.Dense(256, activation='relu')(x) outputs = layers.Dense(1)(x) model = models.Model(inputs=inputs, outputs=outputs) model.compile(optimizer=optimizers.Adam(learning_rate=0.001), loss=losses.MeanSquaredError()) return model def board_to_tensor(board): piece_map = board.piece_map() tensor = np.zeros((12, 8, 8), dtype=np.float32) for pos, piece in piece_map.items(): x, y = (pos % 8, pos // 8) piece_index = piece.piece_type - 1 + (6 if piece.color == chess.BLACK else 0) tensor[piece_index, x, y] = 1 return tensor[np.newaxis, :, :, :] def evaluate(board, model): tensor = board_to_tensor(board) if model == chess_model_black: evaluations = -model(tensor).numpy().item() if model == chess_model_white: evaluations = model(tensor).numpy().item() return evaluations def select_move(board, model, temperature=0.2): legal_moves = list(board.legal_moves) scores = [] for move in legal_moves: board.push(move) scores.append(evaluate(board, model)) board.pop() probs = np.exp(np.array(scores) / temperature) probs /= probs.sum() move_index = np.random.choice(len(legal_moves), p=probs) return legal_moves[move_index] def play_single_game(model_white, model_black, game_id): board = chess.Board() game_moves = [] game_values = [] while not board.is_game_over(): model = model_white if board.turn == chess.WHITE else model_black move = select_move(board, model) value = evaluate(board, model) game_moves.append(move) game_values.append(value) board.push(move) yield (game_id, board) result = board.result() target_values = np.zeros(len(game_values)) if result == '1-0': target_values[-1] = 100 elif result == '0-1': target_values[-1] = -100 elif result == '1/2-1/2': target_values[-1] = 0 for i in range(len(game_values) - 2, -1, -2): target_values[i] = -target_values[i + 1] train_data = [] temp_board = chess.Board() for move in game_moves: temp_board.push(move) train_data.append(board_to_tensor(temp_board)) train_data = np.vstack(train_data) yield (game_id, train_data, target_values) def train_self_play(model_white, model_black, num_games=2, num_epochs=10000, num_workers=2): scores = {'white_wins': 0, 'black_wins': 0, 'draws': 0} for epoch in range(num_epochs): executor = concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) game_progress = [play_single_game(model_white, model_black, game_id) for game_id in range(num_games)] finished_games = 0 while finished_games < num_games: clear_output(wait=True) total_games = scores['white_wins'] + scores['black_wins'] + scores['draws'] white_win_rate = 0 black_win_rate = 0 if scores['white_wins'] > 0: white_win_rate = scores['white_wins'] / total_games * 100 if scores['black_wins'] > 0: black_win_rate = scores['black_wins'] / total_games * 100 print(f'Epoch {epoch + 1}/{num_epochs}') print(f'Finished games: {finished_games}/{num_games}') print('Scores after epoch {}: {}'.format(epoch + 1, scores)) print(f'White win rate: {white_win_rate:.2f}%') print(f'Black win rate: {black_win_rate:.2f}%') print() displayed_game = False for game_id, game_gen in enumerate(game_progress): if game_gen is None: continue try: game_state = next(game_gen) if len(game_state) == 3: game_id, train_data, target_values = game_state model_white.fit(train_data[::2], target_values[::2], batch_size=len(target_values[::2]), verbose=0) model_black.fit(train_data[1::2], target_values[1::2], batch_size=len(target_values[1::2]), verbose=0) last_target_value = target_values[-1] if last_target_value == 1: scores['white_wins'] += 1 elif last_target_value == -1: scores['black_wins'] += 1 else: scores['draws'] += 1 game_progress[game_id] = None finished_games += 1 elif game_id < 3 and (not displayed_game): game_id, board = game_state print(f'Game {game_id}:') print(board) print() displayed_game = True except StopIteration: game_progress[game_id] = None executor.shutdown(wait=True) if __name__ == '__main__': chess_model_white = create_chess_model() chess_model_black = create_chess_model() train_self_play(chess_model_white, chess_model_black)
code
129018650/cell_1
[ "text_plain_output_1.png" ]
!pip install chess
code
73067804/cell_6
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_9.png", "application_vnd.jupyter.stderr_output_4.png", "application_vnd.jupyter.stderr_output_6.png", "application_vnd.jupyter.stderr_output_8.png", "application_vnd.jupyter.stderr_output_10.png", "text_plain_output_3.png", "text_plain_output_7.png", "text_plain_output_1.png", "text_plain_output_11.png" ]
from sklearn.metrics import mean_squared_error from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import pandas as pd train = pd.read_csv('../input/train10fold/train-folds (1).csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') submission_data = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') features = [col for col in train.columns if col not in ('id', 'target', 'kfold')] object_cols = [col for col in features if 'cat' in col] ordinal_encoder = OrdinalEncoder() xtest = test[features] xtest = xtest.copy() xtest[object_cols] = ordinal_encoder.fit_transform(xtest[object_cols]) final_preds = [] for fold in range(5): xtrain = train[train.kfold != fold].reset_index(drop=True) xvalid = train[train.kfold == fold].reset_index(drop=True) ytrain = xtrain.target yvalid = xvalid.target xtrain = xtrain[features] xvalid = xvalid[features] xtrain[object_cols] = ordinal_encoder.fit_transform(xtrain[object_cols]) xvalid[object_cols] = ordinal_encoder.fit_transform(xvalid[object_cols]) best_params = {'learning_rate': 0.07853392035787837, 'colsample_bytree': 0.170759104940733, 'max_depth': 3, 'reg_lambda': 1.7549293092194938e-05, 'reg_alpha': 14.68267919457715, 'subsample': 0.8031450486786944, 'alpha': 30} model = XGBRegressor(objective='reg:squarederror', n_estimators=5000, random_state=0, **best_params) model.fit(xtrain, ytrain, early_stopping_rounds=300, eval_set=[(xvalid, yvalid)], verbose=1000) preds_valid = model.predict(xvalid) test_preds = model.predict(xtest) final_preds.append(test_preds) print(fold, mean_squared_error(yvalid, preds_valid, squared=False))
code
73067804/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train10fold/train-folds (1).csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') submission_data = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.head()
code
73067804/cell_5
[ "text_plain_output_1.png" ]
"""def run(trial): #optimize in one fold fold = 0 xtrain = train[train.kfold != fold].reset_index(drop=True) xvalid = train[train.kfold == fold].reset_index(drop=True) ytrain = xtrain.target yvalid = xvalid.target xtrain = xtrain[features] xvalid = xvalid[features] xtrain[object_cols]= ordinal_encoder.fit_transform(xtrain[object_cols]) xvalid[object_cols] = ordinal_encoder.fit_transform(xvalid[object_cols]) learning_rate = trial.suggest_float("learning_rate", 1e-2, 0.8, log=True) colsample_bytree = trial.suggest_float('colsample_bytree', 0.1, 0.6) max_depth = trial.suggest_int('max_depth', 1, 9) subsample = trial.suggest_float('subsample', 0.1, 0.6) reg_lambda = trial.suggest_float('reg_lambda', 1e-5, 100.0) reg_alpha = trial.suggest_float('reg_alpha', 1e-5, 100.0) alpha = trial.suggest_int('alpha', 0, 100) model = XGBRegressor(random_state = 0, alpha=alpha, n_estimators=200, tree_method='gpu_hist', gpu_id=0, predictor='gpu_predictor', learning_rate = learning_rate, colsample_bytree = colsample_bytree, max_depth = max_depth, subsample = subsample, reg_lambda = reg_lambda, reg_alpha = reg_alpha) model.fit(xtrain, ytrain) preds_valid = model.predict(xvalid) rmse = mean_squared_error(yvalid, preds_valid, squared=False) return rmse study = optuna.create_study(direction="minimize") study.optimize(run, n_trials=5000) #study.best_params final_preds = [] for fold in range(5): xtrain = train[train.kfold != fold].reset_index(drop=True) xvalid = train[train.kfold == fold].reset_index(drop=True) ytrain = xtrain.target yvalid = xvalid.target xtrain = xtrain[features] xvalid = xvalid[features] xtrain[object_cols]= ordinal_encoder.fit_transform(xtrain[object_cols]) xvalid[object_cols] = ordinal_encoder.fit_transform(xvalid[object_cols]) best_params = {'learning_rate': 0.34090767065203226, 'colsample_bytree': 0.12289350813119115, 'max_depth': 7, 'reg_lambda': 5.830490094721956, 'reg_alpha': 49.68136144185203, 'alpha': 30 } model = XGBRegressor(objective='reg:squarederror', n_estimators=200, random_state=0, **best_params #tree_method='gpu_hist', #gpu_id=0, #predictor='gpu_predictor' ) model.fit(xtrain, ytrain) preds_valid = model.predict(xvalid) test_preds = model.predict(xtest) final_preds.append(test_preds) print(fold, mean_squared_error(yvalid, preds_valid, squared=False)) """
code
17099787/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error,mean_absolute_error from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor Lasso = Lasso() Ridge = Ridge() KNNR = KNeighborsRegressor() RFR = RandomForestRegressor(bootstrap=True, max_depth=80, max_features=3, min_samples_leaf=3, min_samples_split=8, n_estimators=500) XgbR = XGBRegressor(colsample_bytree=0.9, learning_rate=0.4, n_estimators=500, reg_alpha=0.4) skLearn_Model_Comparision_Train_Test([KNNR, RFR, XgbR, Lasso, Ridge], X_train, np.ravel(y_train), X_test, np.ravel(y_test))
code
17099787/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17099787/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.metrics import mean_squared_error,mean_absolute_error from sklearn.neighbors import KNeighborsRegressor from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) def Model_Comparision_Train_Test(AllModels, x_train, y_train, x_test, y_test): return_df = pd.DataFrame(columns=['Model', 'MSE', 'RMSE', 'MAE']) for myModel in AllModels: myModel.fit(x_train, y_train) y_pred_train = myModel.predict(x_train) mse_train, rmse_train, mae_train = extract_metrics_from_predicted(y_train, y_pred_train) y_pred_test = myModel.predict(x_test) mse_test, rmse_test, mae_test = extract_metrics_from_predicted(y_test, y_pred_test) summary = pd.DataFrame([[type(myModel).__name__, ''.join([str(round(mse_test, 3)), '(', str(round(mse_train, 3)), ')']), ''.join([str(round(rmse_test, 3)), '(', str(round(rmse_train, 3)), ')']), ''.join([str(round(mae_test, 3)), '(', str(round(mae_test, 3)), ')'])]], columns=['Model', 'MSE', 'RMSE', 'MAE']) return_df = pd.concat([return_df, summary], axis=0) return_df.set_index('Model', inplace=True) return return_df def extract_metrics_from_predicted(y_true, y_pred): from sklearn.metrics import mean_squared_error, mean_absolute_error mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) return (mse, rmse, mae) from sklearn.linear_model import LinearRegression from sklearn.linear_model import Lasso from sklearn.linear_model import Ridge from sklearn.tree import DecisionTreeRegressor from sklearn.ensemble import AdaBoostRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor Lasso = Lasso() Ridge = Ridge() KNNR = KNeighborsRegressor() RFR = RandomForestRegressor(bootstrap=True, max_depth=80, max_features=3, min_samples_leaf=3, min_samples_split=8, n_estimators=500) XgbR = XGBRegressor(colsample_bytree=0.9, learning_rate=0.4, n_estimators=500, reg_alpha=0.4) skLearn_Model_Comparision_Train_Test([KNNR, RFR, XgbR, Lasso, Ridge], X_train, np.ravel(y_train), X_test, np.ravel(y_test)) def mape(y_true, y_pred): y_true, y_pred = (np.array(y_true), np.array(y_pred)) return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 print('mape:', mape(y_test, preds_test))
code