path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
32063423/cell_23
[ "text_plain_output_1.png" ]
from xgboost import XGBRegressor from xgboost import plot_importance from xgboost import plot_importance, plot_tree import matplotlib.pyplot as plt model1 = XGBRegressor(n_estimators=1000) model1.fit(X_train, y_train[:, 0]) from xgboost import plot_importance import matplotlib.pyplot as plt def plot_features(booster, figsize): fig, ax = plt.subplots(1,1,figsize=figsize) return plot_importance(booster=booster, ax=ax) plot_features(model1, (10, 14))
code
32063423/cell_20
[ "text_plain_output_1.png" ]
from xgboost import XGBRegressor model1 = XGBRegressor(n_estimators=1000) model1.fit(X_train, y_train[:, 0])
code
32063423/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train_date_min = df_train['Date'].min() train_date_max = df_train['Date'].max() test_date_min = df_test['Date'].min() test_date_max = df_test['Date'].max() test_date_min - train_date_max
code
32063423/cell_15
[ "text_plain_output_1.png" ]
from fastai.tabular import add_datepart from sklearn.preprocessing import OrdinalEncoder import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_train['Date'] = pd.to_datetime(df_train['Date'], format='%Y-%m-%d') df_test['Date'] = pd.to_datetime(df_test['Date'], format='%Y-%m-%d') def categoricalToInteger(df): df.Province_State.fillna('NaN', inplace=True) oe = OrdinalEncoder() df[['Province_State', 'Country_Region']] = oe.fit_transform(df.loc[:, ['Province_State', 'Country_Region']]) return df add_datepart(df_train, 'Date', drop=False) df_train.drop('Elapsed', axis=1, inplace=True) df_train = categoricalToInteger(df_train) def lag_feature(df, lags, col): tmp = df[['Dayofyear', 'Country_Region', 'Province_State', col]] for i in lags: shifted = tmp.copy() shifted.columns = ['Dayofyear', 'Country_Region', 'Province_State', col + '_lag_' + str(i)] shifted['Dayofyear'] += i df = pd.merge(df, shifted, on=['Dayofyear', 'Country_Region', 'Province_State'], how='left') return df df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'ConfirmedCases') df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'Fatalities') df_train.columns lags = [1, 2, 3, 6, 11] features = ['ConfirmedCases', 'Fatalities', 'Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] for lag in lags: features.append('ConfirmedCases_lag_' + str(lag)) features.append('Fatalities_lag_' + str(lag)) corr_matrix = df_train[features].corr() corr_matrix['ConfirmedCases'].sort_values(ascending=False) corr_matrix['Fatalities'].sort_values(ascending=False)
code
32063423/cell_24
[ "text_plain_output_1.png" ]
from xgboost import XGBRegressor from xgboost import plot_importance from xgboost import plot_importance, plot_tree import matplotlib.pyplot as plt model2 = XGBRegressor(n_estimators=1000) model2.fit(X_train, y_train[:, 1]) from xgboost import plot_importance import matplotlib.pyplot as plt def plot_features(booster, figsize): fig, ax = plt.subplots(1,1,figsize=figsize) return plot_importance(booster=booster, ax=ax) plot_features(model2, (10, 14))
code
32063423/cell_14
[ "text_plain_output_1.png" ]
from fastai.tabular import add_datepart from sklearn.preprocessing import OrdinalEncoder import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') df_train['Date'] = pd.to_datetime(df_train['Date'], format='%Y-%m-%d') df_test['Date'] = pd.to_datetime(df_test['Date'], format='%Y-%m-%d') def categoricalToInteger(df): df.Province_State.fillna('NaN', inplace=True) oe = OrdinalEncoder() df[['Province_State', 'Country_Region']] = oe.fit_transform(df.loc[:, ['Province_State', 'Country_Region']]) return df add_datepart(df_train, 'Date', drop=False) df_train.drop('Elapsed', axis=1, inplace=True) df_train = categoricalToInteger(df_train) def lag_feature(df, lags, col): tmp = df[['Dayofyear', 'Country_Region', 'Province_State', col]] for i in lags: shifted = tmp.copy() shifted.columns = ['Dayofyear', 'Country_Region', 'Province_State', col + '_lag_' + str(i)] shifted['Dayofyear'] += i df = pd.merge(df, shifted, on=['Dayofyear', 'Country_Region', 'Province_State'], how='left') return df df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'ConfirmedCases') df_train = lag_feature(df_train, [1, 2, 3, 6, 11], 'Fatalities') df_train.columns lags = [1, 2, 3, 6, 11] features = ['ConfirmedCases', 'Fatalities', 'Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'] for lag in lags: features.append('ConfirmedCases_lag_' + str(lag)) features.append('Fatalities_lag_' + str(lag)) corr_matrix = df_train[features].corr() corr_matrix['ConfirmedCases'].sort_values(ascending=False)
code
32063423/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df_train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') df_test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') test_date_min = df_test['Date'].min() test_date_max = df_test['Date'].max() print('Minimum date from test set: {}'.format(test_date_min)) print('Maximum date from test set: {}'.format(test_date_max))
code
2029174/cell_4
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np import pandas dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True) dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site'] dataframe = dataframe.drop('seq_name', axis=1) dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True) dataset = dataframe.values DataX = np.array(dataset[:, 0:7]) DataY = np.transpose([dataset[:, 7]]) X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2) print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape)
code
2029174/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import pandas dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True) dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site'] dataframe = dataframe.drop('seq_name', axis=1) dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True) dataset = dataframe.values DataX = np.array(dataset[:, 0:7]) DataY = np.transpose([dataset[:, 7]]) def intialize_parameters(n_x, n_h, n_y): np.random.seed(4) W1 = np.random.randn(n_h, n_x) W2 = np.random.randn(n_y, n_h) parameters = {'W1': W1, 'W2': W2} return parameters intialize_parameters(5, 4, 3)
code
2029174/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True) dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site'] dataframe = dataframe.drop('seq_name', axis=1) dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True) dataset = dataframe.values DataX = np.array(dataset[:, 0:7]) DataY = np.transpose([dataset[:, 7]]) def intialize_parameters(n_x, n_h, n_y): np.random.seed(4) W1 = np.random.randn(n_h, n_x) W2 = np.random.randn(n_y, n_h) parameters = {'W1': W1, 'W2': W2} return parameters def intialize_parameters_deep(layer_dims): np.random.seed(4) L = len(layer_dims) parameters = {} for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) return parameters parameters = intialize_parameters_deep([5, 4, 3]) print(parameters)
code
2029174/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas dataframe = pandas.read_csv('../input/ecoli.csv', delim_whitespace=True) dataframe.columns = ['seq_name', 'mcg', 'gvh', 'lip', 'chg', 'aac', 'alm1', 'alm2', 'site'] dataframe = dataframe.drop('seq_name', axis=1) dataframe.site.replace(('cp', 'im', 'pp', 'imU', 'om', 'omL', 'imL', 'imS'), (1, 0, 0, 0, 0, 0, 0, 0), inplace=True) dataset = dataframe.values DataX = np.array(dataset[:, 0:7]) print(DataX.shape) DataY = np.transpose([dataset[:, 7]]) print(DataY.shape)
code
2029174/cell_10
[ "text_plain_output_1.png" ]
A_prev, W, b = linear_activation_forward_test_case()
code
17098287/cell_21
[ "text_html_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary()
code
17098287/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] y.head()
code
17098287/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data.head()
code
17098287/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['Amount'].values
code
17098287/cell_30
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.metrics import confusion_matrix import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5) score = model.evaluate(x_test, y_test) y_pred = model.predict(x_test) y_pred.shape y_test.shape y_test = pd.DataFrame(y_test) cnf_matrix = confusion_matrix(y_test, y_pred.round()) print(cnf_matrix)
code
17098287/cell_33
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5) score = model.evaluate(x_test, y_test) def plot_confusion_matrix(cm, classes, normalize=False, title='None', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): plt.text(j, i, format(cm[i, j], fmt), ha='center', va='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() y_pred = model.predict(x_test) y_pred.shape y_test.shape y_test = pd.DataFrame(y_test) y_pred1 = model.predict(x) y_actual = pd.DataFrame(y) cnf_matrix1 = confusion_matrix(y_actual, y_pred1.round()) plot_confusion_matrix(cnf_matrix1, classes=[0, 1])
code
17098287/cell_20
[ "text_html_output_1.png" ]
from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout
code
17098287/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['Amount'].values.shape[-1]
code
17098287/cell_29
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) y_test.shape y_test = pd.DataFrame(y_test) y_test.head()
code
17098287/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data.head(10)
code
17098287/cell_19
[ "text_html_output_1.png" ]
import numpy as np # linear algebra np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) x_train
code
17098287/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os import matplotlib.pyplot as plt print(os.listdir('../input')) from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.metrics import accuracy_score
code
17098287/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['Amount'].values.reshape(-1, 1).shape
code
17098287/cell_28
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) y_test.shape
code
17098287/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data.head()
code
17098287/cell_15
[ "text_plain_output_1.png" ]
for i in [x_train, x_test, y_train, y_test]: print(i.shape)
code
17098287/cell_16
[ "text_plain_output_1.png" ]
x_train.head()
code
17098287/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['Amount']
code
17098287/cell_17
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra np.array(x_train)
code
17098287/cell_31
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5) score = model.evaluate(x_test, y_test) def plot_confusion_matrix(cm, classes, normalize=False, title='None', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): plt.text(j, i, format(cm[i, j], fmt), ha='center', va='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() y_pred = model.predict(x_test) y_pred.shape y_test.shape y_test = pd.DataFrame(y_test) cnf_matrix = confusion_matrix(y_test, y_pred.round()) plot_confusion_matrix(cnf_matrix, classes=[0, 1])
code
17098287/cell_24
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential import numpy as np # linear algebra np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5) score = model.evaluate(x_test, y_test) print(score)
code
17098287/cell_22
[ "text_html_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential import numpy as np # linear algebra np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5)
code
17098287/cell_10
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) data.head()
code
17098287/cell_27
[ "text_html_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential import numpy as np # linear algebra np.array(x_train) x_train = np.array(x_train) x_test = np.array(x_test) y_train = np.array(y_train) y_test = np.array(y_test) model = Sequential([Dense(units=16, input_dim=29, activation='relu'), Dense(units=24, activation='relu'), Dropout(0.5), Dense(20, activation='relu'), Dense(24, activation='relu'), Dense(1, activation='sigmoid')]) model.summary() model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) model.fit(x_train, y_train, batch_size=15, epochs=5) score = model.evaluate(x_test, y_test) y_pred = model.predict(x_test) y_pred.shape
code
17098287/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] x.head()
code
17098287/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['Amount'].values.shape
code
17101142/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
x_train.head()
code
17101142/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) data.head()
code
17101142/cell_20
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) def plot_confusion_matrix(cm, classes, normalize=False, title='None', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): plt.text(j, i, format(cm[i, j], fmt), ha='center', va='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() cnf_matrix = confusion_matrix(y_test, y_pred) y_pred1 = random_forest.predict(x) y_pred1 y_pred1.round() cnf_matrix = confusion_matrix(y, y_pred1) plot_confusion_matrix(cnf_matrix, classes=[0, 1])
code
17101142/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data.head(10)
code
17101142/cell_11
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test)
code
17101142/cell_19
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) y_pred1 = random_forest.predict(x) y_pred1 y_pred1.round()
code
17101142/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.metrics import confusion_matrix from sklearn.utils.multiclass import unique_labels from sklearn.metrics import accuracy_score from sklearn.ensemble import RandomForestClassifier print(os.listdir('../input'))
code
17101142/cell_7
[ "text_plain_output_1.png" ]
y_train.values
code
17101142/cell_18
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data = data.drop(['Time'], axis=1) x = data.iloc[:, data.columns != 'Class'] y = data.iloc[:, data.columns == 'Class'] y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) y_pred1 = random_forest.predict(x) y_pred1
code
17101142/cell_8
[ "text_plain_output_1.png" ]
y_train.values y_train.values.ravel()
code
17101142/cell_15
[ "text_plain_output_1.png" ]
y_test.head()
code
17101142/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) cnf_matrix = confusion_matrix(y_test, y_pred) print(cnf_matrix)
code
17101142/cell_3
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/creditcard.csv') data['normalized_amt'] = StandardScaler().fit_transform(data['Amount'].values.reshape(-1, 1)) data = data.drop(['Amount'], axis=1) data.head()
code
17101142/cell_17
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt import numpy as np # linear algebra y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) def plot_confusion_matrix(cm, classes, normalize=False, title='None', cmap=plt.cm.Blues): """ This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """ if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): plt.text(j, i, format(cm[i, j], fmt), ha='center', va='center', color='white' if cm[i, j] > thresh else 'black') plt.tight_layout() cnf_matrix = confusion_matrix(y_test, y_pred) plot_confusion_matrix(cnf_matrix, classes=[0, 1])
code
17101142/cell_14
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel()) y_pred = random_forest.predict(x_test) random_forest.score(x_test, y_test) y_pred
code
17101142/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier y_train.values y_train.values.ravel() random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(x_train, y_train.values.ravel())
code
74044330/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from random import random from statsmodels.tsa.ar_model import AR import matplotlib.pyplot as plt xdata = range(1, 100) ydata = [x + 3 * random() for x in xdata] plt.xlim(0, 100) plt.ylim(0, 100) plt.scatter(xdata, ydata, s=10) plt.show() model = AR(ydata) model_fit = model.fit()
code
74044330/cell_6
[ "text_plain_output_1.png" ]
from random import random from statsmodels.tsa.ar_model import AR import matplotlib.pyplot as plt xdata = range(1, 100) ydata = [x + 3 * random() for x in xdata] plt.xlim(0, 100) plt.ylim(0, 100) model = AR(ydata) model_fit = model.fit() yhat = model_fit.predict(start=90, end=110) print('Predicted value for Auto Regression ', yhat)
code
74044330/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from random import random from statsmodels.tsa.ar_model import AR from statsmodels.tsa.arima_model import ARMA import matplotlib.pyplot as plt xdata = range(1, 100) ydata = [x + 3 * random() for x in xdata] plt.xlim(0, 100) plt.ylim(0, 100) model = AR(ydata) model_fit = model.fit() yhat = model_fit.predict(start=90, end=110) model = ARMA(ydata, order=(0, 1)) model_fit = model.fit(disp=False) yhat = model_fit.predict(start=90, end=110) print('Predicted value for Moving Average 0,1 ', yhat)
code
74044330/cell_10
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from random import random from statsmodels.tsa.ar_model import AR from statsmodels.tsa.arima_model import ARMA import matplotlib.pyplot as plt xdata = range(1, 100) ydata = [x + 3 * random() for x in xdata] plt.xlim(0, 100) plt.ylim(0, 100) model = AR(ydata) model_fit = model.fit() yhat = model_fit.predict(start=90, end=110) model = ARMA(ydata, order=(0, 1)) model_fit = model.fit(disp=False) yhat = model_fit.predict(start=90, end=110) model_fit = model.fit(disp=False) yhat = model_fit.predict(start=90, end=110) print('Predicted value for Moving Average 2,1 ', yhat)
code
74044330/cell_12
[ "text_plain_output_1.png" ]
from random import random from statsmodels.tsa.ar_model import AR from statsmodels.tsa.arima_model import ARMA from statsmodels.tsa.vector_ar.var_model import VAR import matplotlib.pyplot as plt xdata = range(1, 100) ydata = [x + 3 * random() for x in xdata] plt.xlim(0, 100) plt.ylim(0, 100) model = AR(ydata) model_fit = model.fit() yhat = model_fit.predict(start=90, end=110) model = ARMA(ydata, order=(0, 1)) model_fit = model.fit(disp=False) yhat = model_fit.predict(start=90, end=110) model_fit = model.fit(disp=False) yhat = model_fit.predict(start=90, end=110) data = [] for i in range(100): v1 = i + random() v2 = v1 + random() row = [v1, v2] data.append(row) print(data) model = VAR(data) model_fit = model.fit() yhat = model_fit.forecast(model_fit.y, steps=1) print('Predicted value using VAR ', yhat)
code
128011291/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat
code
128011291/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat['subject'].unique()
code
128011291/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.info()
code
128011291/cell_23
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat grouped_data = meat.groupby('subject')['KG_CAP values'].agg(['sum', 'mean']) fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 10)) colors = ['blue', 'green', 'orange', 'red'] ax1.bar(grouped_data.index, grouped_data['sum'], color=colors) ax1.set_ylabel('Total mean consumed') ax1.set_xticklabels(grouped_data.index, rotation=45, ha='right') for i, v in enumerate(grouped_data['sum']): ax1.text(i, v, str(v), ha='center', va='bottom') ax2.bar(grouped_data.index, grouped_data['mean'], color=colors) ax2.set_ylabel('Mean consumption of meat') ax2.set_xticklabels(grouped_data.index, rotation=45, ha='right') for i, v in enumerate(grouped_data['mean']): ax2.text(i, v, str(v), ha='center', va='bottom') plt.xlabel('Type of meat') plt.suptitle('Total and mean consumption of meat') plt.tight_layout() plt.show()
code
128011291/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat
code
128011291/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.describe()
code
128011291/cell_29
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat['time'] = pd.to_datetime(meat['time'], format='%Y') meat grouped_data = meat.groupby('subject')['KG_CAP values'].agg(['sum', 'mean']) # Create a figure with two subplots fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 10)) # Plot the sum of KG_CAP values colors = ['blue', 'green', 'orange', 'red'] ax1.bar(grouped_data.index, grouped_data['sum'], color=colors) ax1.set_ylabel('Total mean consumed') ax1.set_xticklabels(grouped_data.index, rotation=45, ha='right') # Add exact values on top of each bar in the sum plot for i, v in enumerate(grouped_data['sum']): ax1.text(i, v, str(v), ha='center', va='bottom') # Plot the mean of KG_CAP values ax2.bar(grouped_data.index, grouped_data['mean'], color=colors) ax2.set_ylabel('Mean consumption of meat') ax2.set_xticklabels(grouped_data.index, rotation=45, ha='right') # Add exact values on top of each bar in the mean plot for i, v in enumerate(grouped_data['mean']): ax2.text(i, v, str(v), ha='center', va='bottom') # Set x-axis label and title for the entire figure plt.xlabel('Type of meat') plt.suptitle('Total and mean consumption of meat') # Adjust spacing between subplots plt.tight_layout() # Display the plot plt.show() meat['time'] = pd.to_datetime(meat['time'], format='%Y') subjects = ['BEEF', 'PIG', 'POULTRY', 'SHEEP'] filtered_data = meat[meat['subject'].isin(subjects)] time_intervals = [ (pd.Timestamp('1990-01-01'), pd.Timestamp('1997-01-01')), (pd.Timestamp('1997-01-01'), pd.Timestamp('2004-01-01')), (pd.Timestamp('2004-01-01'), pd.Timestamp('2010-01-01')), (pd.Timestamp('2010-01-01'), pd.Timestamp('2016-01-01')), (pd.Timestamp('2016-01-01'), pd.Timestamp('2022-01-01')), (pd.Timestamp('2022-01-01'), pd.Timestamp('2029-01-01')) ] fig, axes = plt.subplots(2, 2, figsize=(12, 8)) axes = axes.flatten() for i, subject in enumerate(subjects): ax = axes[i] ax.set_title(subject) subject_data = filtered_data[filtered_data['subject'] == subject] x_labels = [] x_ticks = [] for j, (start_date, end_date) in enumerate(time_intervals): interval_data = subject_data[(subject_data['time'] >= start_date) & (subject_data['time'] < end_date)] total_count = interval_data['KG_CAP values'].sum() ax.bar(j, total_count) x_label = f'{start_date.year}-{end_date.year}' x_labels.append(x_label) x_ticks.append(j) ax.set_xticks(x_ticks) ax.set_xticklabels(x_labels, rotation='vertical') ax.set_ylabel('Total Count of KG_CAP values') plt.tight_layout() plt.show() grouped_data = filtered_data.groupby('location')['KG_CAP values'].mean() top_locations = grouped_data.nlargest(15) fig, ax = plt.subplots(figsize=(10, 8)) colors = plt.cm.Set3(range(len(top_locations))) ax.bar(top_locations.index, top_locations.values, color=colors) ax.set_xticklabels(top_locations.index, rotation=45, ha='right') ax.set_xlabel('Location') ax.set_ylabel('Mean KG_CAP values') ax.set_title('Top 15 Locations with Highest Mean KG_CAP Values') plt.tight_layout() plt.show()
code
128011291/cell_26
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat['time'] = pd.to_datetime(meat['time'], format='%Y') meat grouped_data = meat.groupby('subject')['KG_CAP values'].agg(['sum', 'mean']) # Create a figure with two subplots fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 10)) # Plot the sum of KG_CAP values colors = ['blue', 'green', 'orange', 'red'] ax1.bar(grouped_data.index, grouped_data['sum'], color=colors) ax1.set_ylabel('Total mean consumed') ax1.set_xticklabels(grouped_data.index, rotation=45, ha='right') # Add exact values on top of each bar in the sum plot for i, v in enumerate(grouped_data['sum']): ax1.text(i, v, str(v), ha='center', va='bottom') # Plot the mean of KG_CAP values ax2.bar(grouped_data.index, grouped_data['mean'], color=colors) ax2.set_ylabel('Mean consumption of meat') ax2.set_xticklabels(grouped_data.index, rotation=45, ha='right') # Add exact values on top of each bar in the mean plot for i, v in enumerate(grouped_data['mean']): ax2.text(i, v, str(v), ha='center', va='bottom') # Set x-axis label and title for the entire figure plt.xlabel('Type of meat') plt.suptitle('Total and mean consumption of meat') # Adjust spacing between subplots plt.tight_layout() # Display the plot plt.show() meat['time'] = pd.to_datetime(meat['time'], format='%Y') subjects = ['BEEF', 'PIG', 'POULTRY', 'SHEEP'] filtered_data = meat[meat['subject'].isin(subjects)] time_intervals = [(pd.Timestamp('1990-01-01'), pd.Timestamp('1997-01-01')), (pd.Timestamp('1997-01-01'), pd.Timestamp('2004-01-01')), (pd.Timestamp('2004-01-01'), pd.Timestamp('2010-01-01')), (pd.Timestamp('2010-01-01'), pd.Timestamp('2016-01-01')), (pd.Timestamp('2016-01-01'), pd.Timestamp('2022-01-01')), (pd.Timestamp('2022-01-01'), pd.Timestamp('2029-01-01'))] fig, axes = plt.subplots(2, 2, figsize=(12, 8)) axes = axes.flatten() for i, subject in enumerate(subjects): ax = axes[i] ax.set_title(subject) subject_data = filtered_data[filtered_data['subject'] == subject] x_labels = [] x_ticks = [] for j, (start_date, end_date) in enumerate(time_intervals): interval_data = subject_data[(subject_data['time'] >= start_date) & (subject_data['time'] < end_date)] total_count = interval_data['KG_CAP values'].sum() ax.bar(j, total_count) x_label = f'{start_date.year}-{end_date.year}' x_labels.append(x_label) x_ticks.append(j) ax.set_xticks(x_ticks) ax.set_xticklabels(x_labels, rotation='vertical') ax.set_ylabel('Total Count of KG_CAP values') plt.tight_layout() plt.show()
code
128011291/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat['frequency'].unique()
code
128011291/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat.describe()
code
128011291/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat['location'].unique()
code
128011291/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat.info()
code
128011291/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat['indicator'].unique()
code
128011291/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat
code
128011291/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat
code
128011291/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat.drop(['frequency', 'indicator'], axis=1, inplace=True) meat meat['KG_CAP values'] = meat['value'] conversion_factor = 1000 meat.loc[meat['measure'] == 'THND_TONNE', 'KG_CAP values'] *= conversion_factor meat meat['time'] = pd.to_datetime(meat['time'], format='%Y') meat
code
128011291/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) meat = pd.read_csv('/kaggle/input/meat-consumption/meat_consumption.csv') meat meat['measure'].unique()
code
72074669/cell_9
[ "text_plain_output_1.png" ]
import cv2 as cv import cv2 as cv import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) imh
code
72074669/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as mpimg import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') plt.imshow(img)
code
72074669/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd df = pd.read_csv('../input/coviddatasety/phsm-severity-data.csv') df.head(20)
code
72074669/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
from PIL import Image from PIL import Image tanjiro = Image.open('../input/digital-image/tanjiro.png') tanjiro
code
72074669/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) imh import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 1) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 1) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) pl.title('DEMONS SLAYER') pl.subplot(2, 2, 1) pl.imshow(imh) pl.subplot(2, 2, 4) pl.imshow(imh)
code
72074669/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72074669/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) pl.imshow(img)
code
72074669/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 as cv import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) pl.imshow(imh)
code
72074669/cell_3
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') plt.imshow(nezuko)
code
72074669/cell_10
[ "image_output_1.png" ]
import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) imh import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 1) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) pl.title('DEMONS SLAYER') pl.imshow(imh)
code
72074669/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import cv2 as cv import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as pl import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) imh import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 1) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 1) imh = cv.cvtColor(img, cv.COLOR_BGR2RGB) import cv2 as cv import matplotlib.pyplot as pl img = cv.imread('../input/digital-image/tanjiro.png', 0) print('Image Dimension:', img.shape)
code
72074669/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import imageio import matplotlib.pyplot as mpimg import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt nezuko = plt.imread('../input/digital-image/tanjiro.png') import matplotlib.pyplot as plt import matplotlib.pyplot as mpimg img = mpimg.imread('../input/digital-image/tanjiro.png') import imageio import matplotlib.pyplot as plt img = imageio.imread('../input/digital-image/tanjiro.png') plt.imshow(img)
code
18116987/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape author_count = df['authors'].value_counts()[:10] highest_rated = df.sort_values('ratings_count', ascending=False).head(10).set_index('title') plt.figure(figsize=(15, 10)) sns.barplot(highest_rated['ratings_count'], highest_rated.index, palette='deep')
code
18116987/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape df.head(5)
code
18116987/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18116987/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape plt.figure(figsize=(10, 7)) author_count = df['authors'].value_counts()[:10] sns.barplot(x=author_count, y=author_count.index, palette='rocket') plt.title('Top 10 authors with most number of books') plt.xlabel('Number of Books Written')
code
18116987/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape author_count = df['authors'].value_counts()[:10] highest_rated = df.sort_values('ratings_count', ascending=False).head(10).set_index('title') lowest_rated = df.sort_values('ratings_count', ascending=True).head(10).set_index('title') from subprocess import check_output from wordcloud import WordCloud wordcloud = (WordCloud(width=1440, height=1080, relative_scaling=0.5).generate_from_frequencies(df['language_code'].value_counts())) fig = plt.figure(1,figsize=(15, 15)) plt.imshow(wordcloud) plt.axis('off') plt.show() df_cdb = df[df['language_code'] == 'fre'] df_cdb.head(5) plt.figure(figsize=(15, 10)) locs = df_cdb['authors'].value_counts()[:10] sns.barplot(x=locs, y=locs.index, palette='Set3') plt.title('Top 10 Authors with most number of books in French Books') plt.xlabel('Number of Books')
code
18116987/cell_14
[ "text_html_output_1.png" ]
from subprocess import check_output from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape author_count = df['authors'].value_counts()[:10] highest_rated = df.sort_values('ratings_count', ascending=False).head(10).set_index('title') lowest_rated = df.sort_values('ratings_count', ascending=True).head(10).set_index('title') from subprocess import check_output from wordcloud import WordCloud wordcloud = WordCloud(width=1440, height=1080, relative_scaling=0.5).generate_from_frequencies(df['language_code'].value_counts()) fig = plt.figure(1, figsize=(15, 15)) plt.imshow(wordcloud) plt.axis('off') plt.show()
code
18116987/cell_12
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape author_count = df['authors'].value_counts()[:10] highest_rated = df.sort_values('ratings_count', ascending=False).head(10).set_index('title') lowest_rated = df.sort_values('ratings_count', ascending=True).head(10).set_index('title') plt.figure(figsize=(5, 10)) sns.barplot(lowest_rated['ratings_count'].notnull(), lowest_rated.index, palette='Set3')
code
18116987/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/books.csv', error_bad_lines=False) df.shape
code
18141020/cell_9
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') y = data.Price melb_predictors = data.drop(['Price'], axis=1) X = melb_predictors.select_dtypes(exclude=['object']) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=10, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) from sklearn.impute import SimpleImputer my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns X_train_plus = X_train.copy() X_valid_plus = X_valid.copy() for col in cols_with_missing: X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull() X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull() my_imputer = SimpleImputer() imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus)) imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus)) imputed_X_train_plus.columns = X_train_plus.columns imputed_X_valid_plus.columns = X_valid_plus.columns print('MAE from Approach 3 (An Extension to Imputation):') print(score_dataset(imputed_X_train_plus, imputed_X_valid_plus, y_train, y_valid))
code
18141020/cell_19
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') y = data.Price melb_predictors = data.drop(['Price'], axis=1) X = melb_predictors.select_dtypes(exclude=['object']) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=10, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) from sklearn.impute import SimpleImputer my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns X_train_plus = X_train.copy() X_valid_plus = X_valid.copy() for col in cols_with_missing: X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull() X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull() my_imputer = SimpleImputer() imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus)) imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus)) imputed_X_train_plus.columns = X_train_plus.columns imputed_X_valid_plus.columns = X_valid_plus.columns missing_val_count_by_column = X_train.isnull().sum() import pandas as pd from sklearn.model_selection import train_test_split data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') y = data.Price X = data.drop(['Price'], axis=1) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()] X_train_full.drop(cols_with_missing, axis=1, inplace=True) X_valid_full.drop(cols_with_missing, axis=1, inplace=True) low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() s = X_train.dtypes == 'object' object_cols = list(s[s].index) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=100, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) drop_X_train = X_train.select_dtypes(exclude=['object']) drop_X_valid = X_valid.select_dtypes(exclude=['object']) from sklearn.preprocessing import LabelEncoder label_X_train = X_train.copy() label_X_valid = X_valid.copy() label_encoder = LabelEncoder() for col in object_cols: label_X_train[col] = label_encoder.fit_transform(X_train[col]) label_X_valid[col] = label_encoder.transform(X_valid[col]) print('MAE from Approach 2 (Label Encoding):') print(score_dataset(label_X_train, label_X_valid, y_train, y_valid))
code
18141020/cell_7
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=10, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) print('MAE from Approach 1 (Drop columns with missing values):') print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))
code
18141020/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.impute import SimpleImputer from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_absolute_error from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd import pandas as pd from sklearn.model_selection import train_test_split data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') y = data.Price melb_predictors = data.drop(['Price'], axis=1) X = melb_predictors.select_dtypes(exclude=['object']) X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=10, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) cols_with_missing = [col for col in X_train.columns if X_train[col].isnull().any()] reduced_X_train = X_train.drop(cols_with_missing, axis=1) reduced_X_valid = X_valid.drop(cols_with_missing, axis=1) from sklearn.impute import SimpleImputer my_imputer = SimpleImputer() imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train)) imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid)) imputed_X_train.columns = X_train.columns imputed_X_valid.columns = X_valid.columns X_train_plus = X_train.copy() X_valid_plus = X_valid.copy() for col in cols_with_missing: X_train_plus[col + '_was_missing'] = X_train_plus[col].isnull() X_valid_plus[col + '_was_missing'] = X_valid_plus[col].isnull() my_imputer = SimpleImputer() imputed_X_train_plus = pd.DataFrame(my_imputer.fit_transform(X_train_plus)) imputed_X_valid_plus = pd.DataFrame(my_imputer.transform(X_valid_plus)) imputed_X_train_plus.columns = X_train_plus.columns imputed_X_valid_plus.columns = X_valid_plus.columns missing_val_count_by_column = X_train.isnull().sum() import pandas as pd from sklearn.model_selection import train_test_split data = pd.read_csv('../input/melbourne-housing-snapshot/melb_data.csv') y = data.Price X = data.drop(['Price'], axis=1) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2, random_state=0) cols_with_missing = [col for col in X_train_full.columns if X_train_full[col].isnull().any()] X_train_full.drop(cols_with_missing, axis=1, inplace=True) X_valid_full.drop(cols_with_missing, axis=1, inplace=True) low_cardinality_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = low_cardinality_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() s = X_train.dtypes == 'object' object_cols = list(s[s].index) from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error def score_dataset(X_train, X_valid, y_train, y_valid): model = RandomForestRegressor(n_estimators=100, random_state=0) model.fit(X_train, y_train) preds = model.predict(X_valid) return mean_absolute_error(y_valid, preds) drop_X_train = X_train.select_dtypes(exclude=['object']) drop_X_valid = X_valid.select_dtypes(exclude=['object']) print('MAE from Approach 1 (Drop categorical variables):') print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))
code