path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18108547/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() df['Growth_Left'] = df['Potential'] - df['Overall'] sns.lineplot(x='Growth_Left', y='Value_Num', data=df)
code
18108547/cell_39
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor from learntools.core import * y = df.Overall features = ['Age', 'Value_Num', 'Wage_Num', 'Potential'] X = df[features] train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) ml_model = DecisionTreeRegressor(random_state=1) ml_model.fit(train_X, train_y) val_predictions = ml_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) rf_model = RandomForestRegressor(random_state=1) rf_model.fit(train_X, train_y) rf_val_predictions = rf_model.predict(val_X) rf_val_mae = mean_absolute_error(rf_val_predictions, val_y) print('Validation MAE for Random Forest Model: {:,.0f}'.format(rf_val_mae)) print(train_X) print(train_y) print(val_X) print(val_predictions) print(val_y)
code
18108547/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() df['Growth_Left'] = df['Potential'] - df['Overall'] sns.lineplot(x='Growth_Left', y='Wage_Num', data=df)
code
18108547/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df.head()
code
18108547/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18108547/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) print(type(df['Age'][0])) print(type(df['Nationality'][0])) print(type(df['Overall'][0])) print(type(df['Potential'][0])) print(type(df['Value'][0])) print(type(df['Wage'][0]))
code
18108547/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum()
code
18108547/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() top_100 = df[:100] top_100.shape
code
18108547/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() df['Growth_Left'] = df['Potential'] - df['Overall'] sns.lineplot(x='Age', y='Value_Num', data=df)
code
18108547/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum()
code
18108547/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeRegressor from learntools.core import * y = df.Overall features = ['Age', 'Value_Num', 'Wage_Num', 'Potential'] X = df[features] train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1) ml_model = DecisionTreeRegressor(random_state=1) ml_model.fit(train_X, train_y) val_predictions = ml_model.predict(val_X) val_mae = mean_absolute_error(val_predictions, val_y) print('Validation MAE when using a Decision Tree: {:,.0f}'.format(val_mae)) print(train_X) print(train_y) print(val_X) print(val_predictions) print(val_y)
code
18108547/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') df.head()
code
18108547/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() top_100 = df[:100] top_100.shape club_100_plots = top_100['Club'].value_counts() club_100_plots.plot(kind='bar')
code
18108547/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() sns.lineplot(x='Overall', y='Value_Num', data=df)
code
18108547/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape
code
18108547/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() foot_plots = df['Preferred Foot'].value_counts() foot_plots.plot(kind='bar')
code
18108547/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() df['Growth_Left'] = df['Potential'] - df['Overall'] sns.lineplot(x='Age', y='Wage_Num', data=df)
code
18108547/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/data.csv') list(df.columns)
code
18108547/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/data.csv') list(df.columns) def clean_d(string): last_char = string[-1] if last_char == '0': return 0 string = string[1:-1] num = float(string) if last_char == 'K': num = num * 1000 elif last_char == 'M': num = num * 1000000 return num df['Wage_Num'] = df.apply(lambda row: clean_d(row['Wage']), axis=1) df['Value_Num'] = df.apply(lambda row: clean_d(row['Value']), axis=1) df.shape df.isna().sum() df = df.dropna(axis=0, subset=['Preferred Foot']) df.isna().sum() import seaborn as sns sns.set() df['Growth_Left'] = df['Potential'] - df['Overall'] sns.pairplot(df, vars=['Age', 'Overall', 'Wage_Num', 'Value_Num', 'Potential', 'Growth_Left'])
code
2000084/cell_4
[ "text_html_output_1.png" ]
import pandas as pd result = pd.read_csv('../input/catboost1223/catboost1223.csv') result.head()
code
2000084/cell_2
[ "text_plain_output_1.png" ]
!pwd
code
32065345/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df['REASON'].astype('category').cat.categories
code
32065345/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape
code
32065345/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30)
code
32065345/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32065345/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df['REASON'].astype('category').cat.codes
code
32065345/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T
code
32065345/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df.dropna(thresh=8, inplace=True) df.REASON.fillna('DebtCon', inplace=True) df.JOB.fillna('Other', inplace=True) for col in df.columns: if df[col].dtype == 'object': df[col] = df[col].astype('category').cat.codes import seaborn as sn import matplotlib.pyplot as plt plt.figure(figsize=(18, 18)) corrMatrix = df[df['DEBTINC'].notnull()].corr() sn.heatmap(corrMatrix, annot=True) plt.show()
code
32065345/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T print('Rows : ', df.shape[0]) print('Columns : ', df.shape[1]) print('\nFeatures : \n', df.columns.tolist()) print('\nMissing values : ', df.isnull().sum().values.sum()) print('\nUnique values : \n', df.nunique()) print('\nPorcentagem Missing: \n', df.isna().mean().round(4) * 100)
code
32065345/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df.dropna(thresh=8, inplace=True) df.REASON.fillna('DebtCon', inplace=True) df.JOB.fillna('Other', inplace=True) print('INFO : ', df.info()) print('Rows : ', df.shape[0]) print('Columns : ', df.shape[1]) print('\nFeatures : \n', df.columns.tolist()) print('\nMissing values : ', df.isnull().sum().values.sum()) print('\nUnique values : \n', df.nunique()) print('\nPorcentagem Missing: \n', df.isna().mean().round(4) * 100)
code
32065345/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.info()
code
32065345/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df.dropna(thresh=8, inplace=True) df.REASON.fillna('DebtCon', inplace=True) df.JOB.fillna('Other', inplace=True) for col in df.columns: if df[col].dtype == 'object': df[col] = df[col].astype('category').cat.codes print('INFO : ', df.info()) print('Rows : ', df.shape[0]) print('Columns : ', df.shape[1]) print('\nFeatures : \n', df.columns.tolist()) print('\nMissing values : ', df.isnull().sum().values.sum()) print('\nUnique values : \n', df.nunique()) print('\nTotal Missing: \n', df.isna().sum()) print('\nPorcentagem Missing: \n', df.isna().mean().round(4) * 100)
code
32065345/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df['JOB'].astype('category').cat.categories
code
32065345/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.sample(30) df.describe().T df['JOB'].astype('category').cat.codes
code
32065345/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/hmeq-data/hmeq.csv') df.shape df.head()
code
88096283/cell_21
[ "text_plain_output_1.png" ]
dt = create_model('dt')
code
88096283/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes pltdf = df.copy() def detect_NaNs(df_temp): return df.isnull().values.sum() detect_NaNs(df) df.iloc[:, :-1].columns def replace_Nans(df_temp, columns, replacement): for col in columns: df[col] = df[col].fillna(replacement) return df_temp df = replace_Nans(df, df.iloc[:, :-1].columns, -5) test_df = replace_Nans(test_df, df.iloc[:, :-1].columns, -5) def reduce_mem_usage(df, verbose=True): numerics = ['int8','int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024**2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) else: if c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024**2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df test_df = reduce_mem_usage(test_df) df = reduce_mem_usage(df) df.dtypes from pycaret.classification import * setup(data=df.copy(), target='Class', silent=True, normalize=True, session_id=42, create_clusters=False, remove_perfect_collinearity=False, polynomial_features=False, fix_imbalance=False, fold=10) display()
code
88096283/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes
code
88096283/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install pycaret
code
88096283/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes pltdf = df.copy() def detect_NaNs(df_temp): return df.isnull().values.sum() detect_NaNs(df) df.iloc[:, :-1].columns def replace_Nans(df_temp, columns, replacement): for col in columns: df[col] = df[col].fillna(replacement) return df_temp df = replace_Nans(df, df.iloc[:, :-1].columns, -5) test_df = replace_Nans(test_df, df.iloc[:, :-1].columns, -5) def reduce_mem_usage(df, verbose=True): numerics = ['int8', 'int16', 'int32', 'int64', 'float16', 'float32', 'float64'] start_mem = df.memory_usage().sum() / 1024 ** 2 for col in df.columns: col_type = df[col].dtypes if col_type in numerics: c_min = df[col].min() c_max = df[col].max() if str(col_type)[:3] == 'int': if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max: df[col] = df[col].astype(np.int8) elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max: df[col] = df[col].astype(np.int16) elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max: df[col] = df[col].astype(np.int32) elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max: df[col] = df[col].astype(np.int64) elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max: df[col] = df[col].astype(np.float32) else: df[col] = df[col].astype(np.float64) end_mem = df.memory_usage().sum() / 1024 ** 2 if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem)) return df test_df = reduce_mem_usage(test_df) df = reduce_mem_usage(df) df.dtypes
code
88096283/cell_24
[ "text_html_output_1.png" ]
model = dt plot_model(dt, plot='confusion_matrix')
code
88096283/cell_22
[ "text_html_output_1.png" ]
dt = create_model('dt') dt = tune_model(dt, optimize='Precision')
code
88096283/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes pltdf = df.copy() def detect_NaNs(df_temp): print('NaNs in data: ', df.isnull().values.sum()) return df.isnull().values.sum() detect_NaNs(df)
code
88096283/cell_12
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes pltdf = df.copy() def detect_NaNs(df_temp): return df.isnull().values.sum() detect_NaNs(df) df.iloc[:, :-1].columns
code
88096283/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/train.csv') test_df = pd.read_csv('../input/ml-olympiad-tensorflow-malaysia-user-group/test.csv') df.dtypes sns.countplot(df['Class']) print(len(df.loc[df['Class'] == 0]) / len(df) * 100, '%')
code
33105697/cell_4
[ "text_plain_output_1.png" ]
from joblib import Parallel, delayed import os img_size = 32 def process_image(img_file): img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (img_size, img_size)) return img start = time.time() X_data = [] Y_data = [] for j in range(10): print('Load folder c{}'.format(j)) path = os.path.join('../input/state-farm-distracted-driver-detection/imgs/train', 'c' + str(j), '*.jpg') files = glob.glob(path) X_data.extend(Parallel(n_jobs=2)((delayed(process_image)(im_file) for im_file in files))) Y_data.extend([j] * len(files)) end = time.time() - start print('Time: %.2f seconds' % end)
code
33105697/cell_6
[ "text_plain_output_1.png" ]
from joblib import Parallel, delayed import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os img_size = 32 def process_image(img_file): img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (img_size, img_size)) return img start = time.time() X_data = [] Y_data = [] for j in range(10): path = os.path.join('../input/state-farm-distracted-driver-detection/imgs/train', 'c' + str(j), '*.jpg') files = glob.glob(path) X_data.extend(Parallel(n_jobs=2)((delayed(process_image)(im_file) for im_file in files))) Y_data.extend([j] * len(files)) end = time.time() - start X_data = np.array(X_data) Y_data = np.array(Y_data) np.random.shuffle(X_data) np.random.shuffle(Y_data) X_data = X_data[0:1000] Y_data = Y_data[0:1000] plt.imshow(X_data[0], cmap='gray') plt.show() print(Y_data[0])
code
33105697/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from joblib import Parallel, delayed from torch.utils.data import Dataset, DataLoader import numpy as np import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch img_size = 32 def process_image(img_file): img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (img_size, img_size)) return img start = time.time() X_data = [] Y_data = [] for j in range(10): path = os.path.join('../input/state-farm-distracted-driver-detection/imgs/train', 'c' + str(j), '*.jpg') files = glob.glob(path) X_data.extend(Parallel(n_jobs=2)((delayed(process_image)(im_file) for im_file in files))) Y_data.extend([j] * len(files)) end = time.time() - start X_data = np.array(X_data) Y_data = np.array(Y_data) np.random.shuffle(X_data) np.random.shuffle(Y_data) X_data = X_data[0:1000] Y_data = Y_data[0:1000] X_data = torch.Tensor(X_data) X_data = X_data.flatten(start_dim=1) X_data = X_data.numpy() Y_data = np.reshape(Y_data, (-1, 1)) class Drivers_dataset(Dataset): def __init__(self, df): rows = df.shape[0] self.imgnp = df.iloc[:rows, 0:img_size * img_size].values self.labels = df.iloc[:rows, img_size * img_size].values self.rows = rows def __len__(self): return self.rows def __getitem__(self, idx): image = torch.tensor(self.imgnp[idx], dtype=torch.float) / 255 image = image.view(1, img_size, img_size) label = self.labels[idx] return (image, label) trainset = np.append(X_train, np.reshape(Y_train, (-1, 1)), axis=1) testset = np.append(X_test, np.reshape(Y_test, (-1, 1)), axis=1) testset = pd.DataFrame(data=testset) trainset = pd.DataFrame(data=trainset) trainset = Drivers_dataset(trainset) testset = Drivers_dataset(testset) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=2) train_data_iter = iter(trainloader) test_data_iter = iter(testloader) dataiter = iter(trainloader) images, labels = dataiter.next() (images.size(), labels.size()) for data in trainloader: inputs, labels = data print(inputs.shape) print(labels.shape) print(labels.data) break
code
33105697/cell_14
[ "text_plain_output_1.png" ]
from joblib import Parallel, delayed from torch.utils.data import Dataset, DataLoader import numpy as np import numpy as np import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch img_size = 32 def process_image(img_file): img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (img_size, img_size)) return img start = time.time() X_data = [] Y_data = [] for j in range(10): path = os.path.join('../input/state-farm-distracted-driver-detection/imgs/train', 'c' + str(j), '*.jpg') files = glob.glob(path) X_data.extend(Parallel(n_jobs=2)((delayed(process_image)(im_file) for im_file in files))) Y_data.extend([j] * len(files)) end = time.time() - start X_data = np.array(X_data) Y_data = np.array(Y_data) np.random.shuffle(X_data) np.random.shuffle(Y_data) X_data = X_data[0:1000] Y_data = Y_data[0:1000] X_data = torch.Tensor(X_data) X_data = X_data.flatten(start_dim=1) X_data = X_data.numpy() Y_data = np.reshape(Y_data, (-1, 1)) class Drivers_dataset(Dataset): def __init__(self, df): rows = df.shape[0] self.imgnp = df.iloc[:rows, 0:img_size * img_size].values self.labels = df.iloc[:rows, img_size * img_size].values self.rows = rows def __len__(self): return self.rows def __getitem__(self, idx): image = torch.tensor(self.imgnp[idx], dtype=torch.float) / 255 image = image.view(1, img_size, img_size) label = self.labels[idx] return (image, label) trainset = np.append(X_train, np.reshape(Y_train, (-1, 1)), axis=1) testset = np.append(X_test, np.reshape(Y_test, (-1, 1)), axis=1) testset = pd.DataFrame(data=testset) trainset = pd.DataFrame(data=trainset) trainset = Drivers_dataset(trainset) testset = Drivers_dataset(testset) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=2) train_data_iter = iter(trainloader) test_data_iter = iter(testloader) dataiter = iter(trainloader) images, labels = dataiter.next() (images.size(), labels.size())
code
106193702/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.info()
code
106193702/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns
code
106193702/cell_57
[ "text_plain_output_1.png", "image_output_1.png" ]
(X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum()
code
106193702/cell_56
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_train.dtypes
code
106193702/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() df['Bare_Nuclei'].isna().sum()
code
106193702/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (30, 25) correlation = df.corr() correlation['Class'].sort_values(ascending=False)
code
106193702/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes
code
106193702/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.head()
code
106193702/cell_74
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train)
code
106193702/cell_76
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) X_test = pd.DataFrame(X_test, columns=[cols]) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) y_pred
code
106193702/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() df['Bare_Nuclei'].unique()
code
106193702/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (30, 25) df.plot(kind='hist', bins=10, subplots=True, layout=(5, 2), sharex=False, sharey=False)
code
106193702/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum()
code
106193702/cell_65
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_test.isnull().sum() X_test.isnull().sum() X_test.head()
code
106193702/cell_48
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (30, 25) correlation = df.corr() import seaborn as sns a = sns.heatmap(correlation, square=True, annot=True, fmt='.2f', linecolor='white') a.set_xticklabels(a.get_xticklabels(), rotation=90) a.set_yticklabels(a.get_yticklabels(), rotation=30)
code
106193702/cell_61
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum()
code
106193702/cell_54
[ "text_html_output_1.png" ]
(X_train.shape, X_test.shape)
code
106193702/cell_72
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) X_train.head()
code
106193702/cell_64
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_train.head()
code
106193702/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106193702/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape
code
106193702/cell_32
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() df['Class'].value_counts() / np.float(len(df))
code
106193702/cell_62
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_test.isnull().sum() X_test.isnull().sum()
code
106193702/cell_59
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() for col in X_train.columns: if X_train[col].isnull().mean() > 0: print(col, round(X_train[col].isnull().mean(), 4))
code
106193702/cell_58
[ "text_plain_output_1.png" ]
(X_train.shape, X_test.shape) X_test.isnull().sum()
code
106193702/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() df['Bare_Nuclei'].value_counts()
code
106193702/cell_78
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) X_test = pd.DataFrame(X_test, columns=[cols]) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) y_pred knn.predict_proba(X_test)[:, 0] knn.predict_proba(X_test)[:, 1]
code
106193702/cell_80
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) X_test = pd.DataFrame(X_test, columns=[cols]) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) y_pred from sklearn.metrics import accuracy_score print('Model accuracy score: {0:0.4f}'.format(accuracy_score(y_test, y_pred)))
code
106193702/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) for var in df.columns: print(df[var].value_counts())
code
106193702/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() print(round(df.describe(), 2))
code
106193702/cell_77
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df['Bare_Nuclei'] = pd.to_numeric(df['Bare_Nuclei'], errors='coerce') (X_train.shape, X_test.shape) X_train.dtypes X_train.isnull().sum() X_test.isnull().sum() for df1 in [X_train, X_test]: for col in X_train.columns: col_median = X_train[col].median() df1[col].fillna(col_median, inplace=True) X_train.isnull().sum() X_test.isnull().sum() cols = X_train.columns from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) X_train = pd.DataFrame(X_train, columns=[cols]) X_test = pd.DataFrame(X_test, columns=[cols]) from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=3) knn.fit(X_train, y_train) y_pred = knn.predict(X_test) y_pred knn.predict_proba(X_test)[:, 0]
code
106193702/cell_43
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() import matplotlib.pyplot as plt plt.rcParams['figure.figsize'] = (30, 25) correlation = df.corr() correlation
code
106193702/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum() df.isna().sum() df['Class'].value_counts()
code
106193702/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.drop('Id', axis=1, inplace=True) df.dtypes df.isnull().sum()
code
106193702/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/uci-breast-cancer-wisconsin-original/breast-cancer-wisconsin.data.txt') df.shape col_names = ['Id', 'Clump_thickness', 'Uniformity_Cell_Size', 'Uniformity_Cell_Shape', 'Marginal_Adhesion', 'Single_Epithelial_Cell_Size', 'Bare_Nuclei', 'Bland_Chromatin', 'Normal_Nucleoli', 'Mitoses', 'Class'] df.columns = col_names df.columns df.head()
code
311188/cell_4
[ "text_plain_output_1.png" ]
50 + 100
code
311188/cell_6
[ "text_plain_output_1.png" ]
100 + 200
code
311188/cell_1
[ "text_plain_output_1.png" ]
1 + 1
code
311188/cell_3
[ "text_plain_output_1.png" ]
20 + 30
code
311188/cell_5
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import matplotlib.pyplot as plt import numpy as np t = np.arange(0.0, 2.0, 0.01) s = np.sin(2 * np.pi * t) plt.plot(t, s) plt.xlabel('time (s)') plt.ylabel('voltage (mV)') plt.title('About as simple as it gets, folks') plt.grid(True) plt.savefig('test.png') plt.show()
code
128005164/cell_21
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score log_r = LogisticRegression(random_state=0) log_r.fit(X_train, y_train) y_pred_lr = log_r.predict(X_test) accuracy_score(y_test, y_pred_lr) from sklearn.metrics import classification_report print(classification_report(y_test, y_pred_lr))
code
128005164/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.head()
code
128005164/cell_23
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.tree import DecisionTreeClassifier dt = DecisionTreeClassifier() dt.fit(X_train, y_train) y_pred_dt = dt.predict(X_test) accuracy_score(y_test, y_pred_dt)
code
128005164/cell_20
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score log_r = LogisticRegression(random_state=0) log_r.fit(X_train, y_train) y_pred_lr = log_r.predict(X_test) accuracy_score(y_test, y_pred_lr)
code
128005164/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') train.describe()
code
128005164/cell_26
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.ensemble import RandomForestClassifier rf_c = RandomForestClassifier(n_estimators=10, criterion='entropy') rf_c.fit(X_train, y_train) y_pred_rf_c = rf_c.predict(X_test) accuracy_score(y_test, y_pred_rf_c)
code
128005164/cell_19
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import accuracy_score log_r = LogisticRegression(random_state=0) log_r.fit(X_train, y_train)
code
128005164/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128005164/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') pd.concat([train, test], axis=0).isnull().sum()
code
128005164/cell_18
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns cat_columns = train.select_dtypes(exclude=['number']).columns.tolist() cat_columns X = train.drop(['Survived', 'PassengerId', 'Ticket', 'Cabin', 'Name'], axis=1) y = train['Survived'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) print('X_train:', X_train.shape) print('y_train:', y_train.shape) print('X_test:', X_test.shape) print('y_test:', y_test.shape)
code
128005164/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') num_columns = train.select_dtypes(include=['number']).columns.tolist() num_columns
code