path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
74063930/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y dup = income_data.duplicated().any() income_data = income_data.drop_duplicates() income_data.drop(['fnlwgt', 'native-country'], axis=1, inplace=True) income_data = pd.get_dummies(income_data) income_data
code
74063930/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y dup = income_data.duplicated().any() print('Gibt es doppelte Werte in den Daten? ', dup)
code
74063930/cell_25
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train)))
code
74063930/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.describe()
code
74063930/cell_23
[ "text_html_output_1.png" ]
from sklearn.neighbors import KNeighborsRegressor x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) y_pred[0:10]
code
74063930/cell_6
[ "text_html_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum()
code
74063930/cell_40
[ "text_plain_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import KNeighborsRegressor x_train.shape y_train.shape knn = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) knn = KNeighborsClassifier() knn.fit(x_train, y_train) score_knn = knn.score(x_test, y_test) print('The accuracy of the KNN Model is', score_knn)
code
74063930/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) print('k Wert = ', k, 'Fehlerwert = ', fehler)
code
74063930/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.head()
code
74063930/cell_19
[ "text_plain_output_1.png" ]
y_train.shape
code
74063930/cell_18
[ "text_plain_output_1.png" ]
y_test
code
74063930/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) knn_werte = {'n_neighbors': np.arange(1, 7, 1)} knn = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) knn_cv_model = GridSearchCV(knn, knn_werte, cv=10) knn_cv_model.fit(x_train, y_train) knn_cv_model.best_params_['n_neighbors']
code
74063930/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y
code
74063930/cell_16
[ "text_html_output_1.png" ]
x_test
code
74063930/cell_38
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import cross_val_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) knn_werte = {'n_neighbors': np.arange(1, 7, 1)} model = KNeighborsRegressor(n_neighbors=6) model_knn = model.fit(x_train, y_train) np.sqrt(-1 * cross_val_score(model_knn, x_train, y_train, cv=10, scoring='neg_mean_squared_error')).mean()
code
74063930/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.info()
code
74063930/cell_17
[ "text_plain_output_1.png" ]
x_train.shape
code
74063930/cell_31
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.model_selection import GridSearchCV from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) knn_werte = {'n_neighbors': np.arange(1, 7, 1)} knn = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) knn_cv_model = GridSearchCV(knn, knn_werte, cv=10) knn_cv_model.fit(x_train, y_train)
code
74063930/cell_24
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred))
code
74063930/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) print('k Wert = ', k, 'Fehlerwert = ', fehler)
code
74063930/cell_37
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y dup = income_data.duplicated().any() income_data = income_data.drop_duplicates() income_data.drop(['fnlwgt', 'native-country'], axis=1, inplace=True) income_data = pd.get_dummies(income_data) income_data x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) model = KNeighborsRegressor(n_neighbors=6) model_knn = model.fit(x_train, y_train) y_pred = model.predict(x_test) v_W = pd.DataFrame({'y_test': y_test, 'vorhergesagten Werte': y_pred}) v_W v_W['Differenz'] = v_W['y_test'] - v_W['vorhergesagten Werte'] v_W
code
74063930/cell_12
[ "text_html_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y dup = income_data.duplicated().any() income_data = income_data.drop_duplicates() income_data.drop(['fnlwgt', 'native-country'], axis=1, inplace=True) income_data.head()
code
74063930/cell_5
[ "text_html_output_1.png" ]
import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum()
code
74063930/cell_36
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, r2_score from sklearn.neighbors import KNeighborsRegressor import numpy as np import pandas as pd income_data = pd.read_csv('../input/adult-income-dataset/adult.csv') income_data.isnull().sum() income_data.isin(['?']).sum() income_data['workclass'] = income_data['workclass'].replace('?', np.nan) income_data['occupation'] = income_data['occupation'].replace('?', np.nan) income_data['native-country'] = income_data['native-country'].replace('?', np.nan) income_data.income = [1 if each == '>50K' else 0 for each in income_data.income] y = income_data.income y dup = income_data.duplicated().any() income_data = income_data.drop_duplicates() income_data.drop(['fnlwgt', 'native-country'], axis=1, inplace=True) income_data = pd.get_dummies(income_data) income_data x_train.shape y_train.shape knn_model = KNeighborsRegressor(n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=None) model = knn_model.fit(x_train, y_train) y_pred = model.predict(x_test) np.sqrt(mean_squared_error(y_test, y_pred)) np.sqrt(mean_squared_error(y_train, model.predict(x_train))) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_train) fehler = np.sqrt(mean_squared_error(y_train, y_pred)) for k in range(7): k = k + 1 k_model = KNeighborsRegressor(n_neighbors=k).fit(x_train, y_train) y_pred = k_model.predict(x_test) fehler = np.sqrt(mean_squared_error(y_test, y_pred)) model = KNeighborsRegressor(n_neighbors=6) model_knn = model.fit(x_train, y_train) y_pred = model.predict(x_test) v_W = pd.DataFrame({'y_test': y_test, 'vorhergesagten Werte': y_pred}) v_W
code
128034546/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') plt.figure(figsize=(20, 15)) sns.heatmap(train.corr(), annot=True, cmap='coolwarm') plt.show()
code
128034546/cell_6
[ "text_plain_output_1.png" ]
from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') features = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'AverageRainingDays', 'fruitset', 'fruitmass', 'seeds'] X = train[features] y = train['yield'] from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 def mahalanobisR(X, meanCol, IC): m = [] for i in range(X.shape[0]): m.append(mahalanobis(X.iloc[i, :], meanCol, IC) ** 2) return m def MD_detectOutliers(x, alpha=0.001): n = x.shape[0] p = x.shape[1] chi = chi2.ppf(1 - alpha, p) print('The Chi2 val with 16 df is: ', chi) x_minus_mean = x - np.mean(x) S = 1 / (n - 1) * np.dot(np.transpose(x_minus_mean), x_minus_mean) S_inv = np.linalg.inv(S) d = mahalanobisR(x, np.mean(x), S_inv) outliers = [] for i in range(len(d)): if d[i] > chi: outliers.append(i) return {'MD': d, 'Outliers': outliers} outliers = MD_detectOutliers(X, alpha=0.001) print(outliers['Outliers']) X = X.drop(outliers['Outliers']) y = y.drop(outliers['Outliers'])
code
128034546/cell_2
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train.head()
code
128034546/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') features = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'AverageRainingDays', 'fruitset', 'fruitmass', 'seeds'] X = train[features] y = train['yield'] test_features = test[features] test['yield'] = grid_search.predict(test_features) test[['id', 'yield']].to_csv('submission.csv', index=False) display(test[['id', 'yield']])
code
128034546/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split, GridSearchCV from xgboost import XGBRegressor from sklearn.metrics import mean_squared_error, r2_score from sklearn.preprocessing import StandardScaler from sklearn.pipeline import Pipeline
code
128034546/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 from scipy.stats import shapiro import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') features = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'AverageRainingDays', 'fruitset', 'fruitmass', 'seeds'] X = train[features] y = train['yield'] from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 def mahalanobisR(X, meanCol, IC): m = [] for i in range(X.shape[0]): m.append(mahalanobis(X.iloc[i, :], meanCol, IC) ** 2) return m def MD_detectOutliers(x, alpha=0.001): n = x.shape[0] p = x.shape[1] chi = chi2.ppf(1 - alpha, p) x_minus_mean = x - np.mean(x) S = 1 / (n - 1) * np.dot(np.transpose(x_minus_mean), x_minus_mean) S_inv = np.linalg.inv(S) d = mahalanobisR(x, np.mean(x), S_inv) outliers = [] for i in range(len(d)): if d[i] > chi: outliers.append(i) return {'MD': d, 'Outliers': outliers} outliers = MD_detectOutliers(X, alpha=0.001) X = X.drop(outliers['Outliers']) y = y.drop(outliers['Outliers']) from scipy.stats import shapiro stat, p = shapiro(y) print('Statistics=%.3f, p=%.3f' % (stat, p)) alpha = 0.05 if p > alpha: print('Sample looks Gaussian (fail to reject H0)') else: print('Sample does not look Gaussian (reject H0)')
code
128034546/cell_3
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') train.describe(include='all') test.describe(include='all')
code
128034546/cell_10
[ "text_html_output_1.png" ]
from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 from sklearn.metrics import mean_squared_error, r2_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') features = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'AverageRainingDays', 'fruitset', 'fruitmass', 'seeds'] X = train[features] y = train['yield'] from scipy.spatial.distance import mahalanobis from scipy.stats import chi2 def mahalanobisR(X, meanCol, IC): m = [] for i in range(X.shape[0]): m.append(mahalanobis(X.iloc[i, :], meanCol, IC) ** 2) return m def MD_detectOutliers(x, alpha=0.001): n = x.shape[0] p = x.shape[1] chi = chi2.ppf(1 - alpha, p) x_minus_mean = x - np.mean(x) S = 1 / (n - 1) * np.dot(np.transpose(x_minus_mean), x_minus_mean) S_inv = np.linalg.inv(S) d = mahalanobisR(x, np.mean(x), S_inv) outliers = [] for i in range(len(d)): if d[i] > chi: outliers.append(i) return {'MD': d, 'Outliers': outliers} outliers = MD_detectOutliers(X, alpha=0.001) X = X.drop(outliers['Outliers']) y = y.drop(outliers['Outliers']) xgb_pipeline = Pipeline([('scaler', StandardScaler()), ('xgb', XGBRegressor(objective='reg:squarederror', n_jobs=-1, random_state=418, colsample_bytree=0.7, learning_rate=0.01, max_depth=5, n_estimators=1000, subsample=0.7))]) xgb_pipeline.fit(X_train, y_train) y_pred = xgb_pipeline.predict(X_val) rmse = np.sqrt(mean_squared_error(y_val, y_pred)) r2 = r2_score(y_val, y_pred) print('RMSE: ', rmse) print('R2 Score: ', r2)
code
73079465/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) raw.describe()
code
73079465/cell_20
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import surprise import surprise #Scikit-Learn library for recommender systems. raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) raw = raw[['user_id', 'book_id', 'rating']] reader = surprise.Reader(rating_scale=(1, 5)) data = surprise.Dataset.load_from_df(raw, reader) class ProbabilisticMatrixFactorization(surprise.AlgoBase): def __init__(self, learning_rate, num_epochs, num_factors): self.alpha = learning_rate self.num_epochs = num_epochs self.num_factors = num_factors def fit(self, train): P = np.random.normal(0, 0.1, (train.n_users, self.num_factors)) Q = np.random.normal(0, 0.1, (train.n_items, self.num_factors)) for epoch in range(self.num_epochs): for u, i, r_ui in train.all_ratings(): residual = r_ui - np.dot(P[u], Q[i]) temp = P[u, :] P[u, :] += self.alpha * residual * Q[i] Q[i, :] += self.alpha * residual * temp self.P = P self.Q = Q self.trainset = train def estimate(self, u, i): if self.trainset.knows_user(u) and self.trainset.knows_item(i): nanCheck = np.dot(self.P[u], self.Q[i]) if np.isnan(nanCheck): return self.trainset.global_mean else: return np.dot(self.P[u, :], self.Q[i, :]) else: return self.trainset.global_mean Alg1 = ProbabilisticMatrixFactorization(learning_rate=0.05, num_epochs=4, num_factors=10) data1 = data.build_full_trainset() Alg1.fit(data1) Alg1.estimate(raw.user_id.iloc[4], raw.book_id.iloc[4]) gs = surprise.model_selection.GridSearchCV(ProbabilisticMatrixFactorization, param_grid={'learning_rate': [0.005, 0.01], 'num_epochs': [5, 10], 'num_factors': [10, 20]}, measures=['rmse', 'mae'], cv=2) gs.fit(data) print('RMSE: ', gs.best_score['rmse'], 'MAE: ', gs.best_score['mae']) print('RMSE: ', gs.best_params['rmse'], 'MAE: ', gs.best_params['mae']) best_params = gs.best_params['rmse']
code
73079465/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input')) import surprise
code
73079465/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw = pd.read_csv('../input/ratings.csv') raw.describe()
code
73079465/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import surprise import surprise #Scikit-Learn library for recommender systems. raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) raw = raw[['user_id', 'book_id', 'rating']] reader = surprise.Reader(rating_scale=(1, 5)) data = surprise.Dataset.load_from_df(raw, reader) Alg1 = ProbabilisticMatrixFactorization(learning_rate=0.05, num_epochs=4, num_factors=10) data1 = data.build_full_trainset() Alg1.fit(data1) print(raw.user_id.iloc[4], raw.book_id.iloc[4]) Alg1.estimate(raw.user_id.iloc[4], raw.book_id.iloc[4])
code
73079465/cell_24
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import surprise import surprise #Scikit-Learn library for recommender systems. raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) raw = raw[['user_id', 'book_id', 'rating']] reader = surprise.Reader(rating_scale=(1, 5)) data = surprise.Dataset.load_from_df(raw, reader) class ProbabilisticMatrixFactorization(surprise.AlgoBase): def __init__(self, learning_rate, num_epochs, num_factors): self.alpha = learning_rate self.num_epochs = num_epochs self.num_factors = num_factors def fit(self, train): P = np.random.normal(0, 0.1, (train.n_users, self.num_factors)) Q = np.random.normal(0, 0.1, (train.n_items, self.num_factors)) for epoch in range(self.num_epochs): for u, i, r_ui in train.all_ratings(): residual = r_ui - np.dot(P[u], Q[i]) temp = P[u, :] P[u, :] += self.alpha * residual * Q[i] Q[i, :] += self.alpha * residual * temp self.P = P self.Q = Q self.trainset = train def estimate(self, u, i): if self.trainset.knows_user(u) and self.trainset.knows_item(i): nanCheck = np.dot(self.P[u], self.Q[i]) if np.isnan(nanCheck): return self.trainset.global_mean else: return np.dot(self.P[u, :], self.Q[i, :]) else: return self.trainset.global_mean Alg1 = ProbabilisticMatrixFactorization(learning_rate=0.05, num_epochs=4, num_factors=10) data1 = data.build_full_trainset() Alg1.fit(data1) Alg1.estimate(raw.user_id.iloc[4], raw.book_id.iloc[4]) gs = surprise.model_selection.GridSearchCV(ProbabilisticMatrixFactorization, param_grid={'learning_rate': [0.005, 0.01], 'num_epochs': [5, 10], 'num_factors': [10, 20]}, measures=['rmse', 'mae'], cv=2) gs.fit(data) best_params = gs.best_params['rmse'] bestVersion = ProbabilisticMatrixFactorization(learning_rate=best_params['learning_rate'], num_epochs=best_params['num_epochs'], num_factors=best_params['num_factors']) kSplit = surprise.model_selection.KFold(n_splits=10, shuffle=True) for train, test in kSplit.split(data): bestVersion.fit(train) prediction = bestVersion.test(test) surprise.accuracy.rmse(prediction, verbose=True) data.df.isna().sum() raw.isna().sum() bestVersion.estimate(123, 42)
code
73079465/cell_22
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import surprise import surprise #Scikit-Learn library for recommender systems. raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) raw = raw[['user_id', 'book_id', 'rating']] reader = surprise.Reader(rating_scale=(1, 5)) data = surprise.Dataset.load_from_df(raw, reader) class ProbabilisticMatrixFactorization(surprise.AlgoBase): def __init__(self, learning_rate, num_epochs, num_factors): self.alpha = learning_rate self.num_epochs = num_epochs self.num_factors = num_factors def fit(self, train): P = np.random.normal(0, 0.1, (train.n_users, self.num_factors)) Q = np.random.normal(0, 0.1, (train.n_items, self.num_factors)) for epoch in range(self.num_epochs): for u, i, r_ui in train.all_ratings(): residual = r_ui - np.dot(P[u], Q[i]) temp = P[u, :] P[u, :] += self.alpha * residual * Q[i] Q[i, :] += self.alpha * residual * temp self.P = P self.Q = Q self.trainset = train def estimate(self, u, i): if self.trainset.knows_user(u) and self.trainset.knows_item(i): nanCheck = np.dot(self.P[u], self.Q[i]) if np.isnan(nanCheck): return self.trainset.global_mean else: return np.dot(self.P[u, :], self.Q[i, :]) else: return self.trainset.global_mean Alg1 = ProbabilisticMatrixFactorization(learning_rate=0.05, num_epochs=4, num_factors=10) data1 = data.build_full_trainset() Alg1.fit(data1) Alg1.estimate(raw.user_id.iloc[4], raw.book_id.iloc[4]) gs = surprise.model_selection.GridSearchCV(ProbabilisticMatrixFactorization, param_grid={'learning_rate': [0.005, 0.01], 'num_epochs': [5, 10], 'num_factors': [10, 20]}, measures=['rmse', 'mae'], cv=2) gs.fit(data) best_params = gs.best_params['rmse'] bestVersion = ProbabilisticMatrixFactorization(learning_rate=best_params['learning_rate'], num_epochs=best_params['num_epochs'], num_factors=best_params['num_factors']) kSplit = surprise.model_selection.KFold(n_splits=10, shuffle=True) for train, test in kSplit.split(data): bestVersion.fit(train) prediction = bestVersion.test(test) surprise.accuracy.rmse(prediction, verbose=True)
code
73079465/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw = pd.read_csv('../input/ratings.csv') raw.drop_duplicates(inplace=True) print(f'Existem {raw.shape[0]} avalações.') print('Usuários únicos:', len(raw.user_id.unique())) print('Livros únicos:', len(raw.book_id.unique())) print(f'O usuário médio avalia {raw.user_id.value_counts().median()} livros.') print(f'Avaliação mais alta: {raw.rating.max()}. Mais baixa: {raw.rating.min()}') raw.head()
code
1002832/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
df = pd.read_csv('../input/cameras.csv', encoding='utf-8') print('The shape:') print(df.shape) print('\nThe information:') print(df.info()) print('\nNAs:') print(np.sum(df.isnull())) print(np.sum(df.isnull()) / len(df) * 100) print('\nStart and End:') print(df['DATE'][[0, df.shape[0] - 1]]) print('\nDifferent cameras:') print(df['CAMERA ID'].value_counts().head()) print(len(df['CAMERA ID'].value_counts()))
code
50237715/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes
code
50237715/cell_13
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index print(indeces) values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.head()
code
50237715/cell_25
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes
code
50237715/cell_4
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') question.head()
code
50237715/cell_33
[ "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes continous_columns = ['timestamp', 'user_id', 'task_container_id', 'prior_question_elapsed_time', 'question_id', 'bundle_id'] df = questions_interactions.copy() df = df.assign(tags2=df['tags'].str.split(' ')).explode('tags2') df['tags2'] = df['tags2'].astype('int32') df['tags2'].nunique() df['tags2'].hist(grid=False, figsize=(10, 5))
code
50237715/cell_20
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 print('Memory usage of questions_interactions dataframe is :', start_mem_usg1, ' MB') print('Memory usage of lectures_interactions dataframe is :', start_mem_usg2, ' MB')
code
50237715/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes continous_columns = ['timestamp', 'user_id', 'task_container_id', 'prior_question_elapsed_time', 'question_id', 'bundle_id'] color = sns.color_palette()[0] discrete_columns = ['answered_correctly', 'user_answer', 'correct_answer', 'prior_question_had_explanation', 'test_part'] for col in discrete_columns: plt.figure(figsize=(5, 5)) sns.countplot(data=questions_interactions, x=col, color=color) plt.show()
code
50237715/cell_39
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes continous_columns = ['timestamp', 'user_id', 'task_container_id', 'prior_question_elapsed_time', 'question_id', 'bundle_id'] color = sns.color_palette()[0] discrete_columns = ['answered_correctly', 'user_answer', 'correct_answer', 'prior_question_had_explanation', 'test_part'] continous_cols = ['timestamp', 'user_id', 'content_id', 'task_container_id', 'lecture_id', 'tag'] color = sns.color_palette()[0] discrete_columns = ['type_of', 'category'] for col in discrete_columns: plt.figure(figsize=(5, 5)) sns.countplot(data=lectures_interactions, x=col, color=color) plt.show()
code
50237715/cell_2
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import riiideducation import seaborn as sns import matplotlib.pyplot as plt import gc import os import warnings warnings.filterwarnings('ignore') for dirname, _, filenames in os.walk('/kaggle/input/riiid-test-answer-prediction'): for filename in filenames: print(os.path.join(dirname, filename))
code
50237715/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) print(questions_interactions.isnull().sum()) lectures_interactions.isnull().sum()
code
50237715/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) lectures_interactions.head()
code
50237715/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes continous_columns = ['timestamp', 'user_id', 'task_container_id', 'prior_question_elapsed_time', 'question_id', 'bundle_id'] df = questions_interactions.copy() df = df.assign(tags2=df['tags'].str.split(' ')).explode('tags2') df['tags2'] = df['tags2'].astype('int32') df['tags2'].nunique()
code
50237715/cell_8
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape)
code
50237715/cell_16
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() lectures_interactions.head()
code
50237715/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) train_df.head()
code
50237715/cell_24
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) def reduce_mem_usage(props): start_mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage of properties dataframe is :",start_mem_usg," MB") NAlist = [] # Keeps track of columns that have missing values filled in. for col in props.columns: if props[col].dtype != object: # Exclude strings # make variables for Int, max and min IsInt = False mx = props[col].max() mn = props[col].min() # Integer does not support NA, therefore, NA needs to be filled if not np.isfinite(props[col]).all(): NAlist.append(col) props[col].fillna(mn-1,inplace=True) # test if column can be converted to an integer asint = props[col].fillna(0).astype(np.int64) result = (props[col] - asint) result = result.sum() if result > -0.01 and result < 0.01: IsInt = True # Make Integer/unsigned Integer datatypes if IsInt: if mn >= 0: if mx < 255: props[col] = props[col].astype(np.uint8) elif mx < 65535: props[col] = props[col].astype(np.uint16) elif mx < 4294967295: props[col] = props[col].astype(np.uint32) else: props[col] = props[col].astype(np.uint64) else: if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max: props[col] = props[col].astype(np.int8) elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max: props[col] = props[col].astype(np.int16) elif mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max: props[col] = props[col].astype(np.int32) elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max: props[col] = props[col].astype(np.int64) # Make float datatypes 32 bit else: props[col] = props[col].astype(np.float32) # Print new column type # print("dtype after: ",props[col].dtype) # print("******************************") # Print final result print("___MEMORY USAGE AFTER COMPLETION:___") mem_usg = props.memory_usage().sum() / 1024**2 print("Memory usage is: ",mem_usg," MB") print("This is ",100*mem_usg/start_mem_usg,"% of the initial size") return props, NAlist questions_interactions, _ = reduce_mem_usage(questions_interactions)
code
50237715/cell_14
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts()
code
50237715/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 questions_interactions.dtypes questions_interactions.drop(columns=['content_id', 'content_type_id'], inplace=True) questions_interactions.dtypes continous_columns = ['timestamp', 'user_id', 'task_container_id', 'prior_question_elapsed_time', 'question_id', 'bundle_id'] questions_interactions.hist(column=continous_columns, grid=False, figsize=(20, 15))
code
50237715/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 continous_cols = ['timestamp', 'user_id', 'content_id', 'task_container_id', 'lecture_id', 'tag'] lectures_interactions.hist(column=continous_cols, grid=False, figsize=(20, 15))
code
50237715/cell_5
[ "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') lecture.head()
code
50237715/cell_36
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd train_df = pd.read_csv('/kaggle/input/riiid-test-answer-prediction/train.csv', low_memory=False, nrows=10 ** 6, dtype={'row_id': 'int64', 'timestamp': 'int64', 'user_id': 'int32', 'content_id': 'int16', 'content_type_id': 'int8', 'task_container_id': 'int16', 'user_answer': 'int8', 'answered_correctly': 'int8', 'prior_question_elapsed_time': 'float32', 'prior_question_had_explanation': 'boolean'}) question = pd.read_csv('../input/riiid-test-answer-prediction/questions.csv') lecture = pd.read_csv('../input/riiid-test-answer-prediction/lectures.csv') questions_interactions = train_df.merge(question, left_on='content_id', right_on='question_id', how='left') questions_interactions = questions_interactions[questions_interactions.content_type_id == 0] questions_interactions.rename(columns={'part': 'test_part'}, inplace=True) lectures_interactions = train_df.merge(lecture, left_on='content_id', right_on='lecture_id', how='left') lectures_interactions.rename(columns={'part': 'category'}, inplace=True) lectures_interactions = lectures_interactions[lectures_interactions.content_type_id == 1] (questions_interactions.shape, lectures_interactions.shape) lectures_interactions.isnull().sum() indeces = questions_interactions[questions_interactions.prior_question_had_explanation.isnull()].index values = {'prior_question_elapsed_time': -1, 'prior_question_had_explanation': False} questions_interactions.fillna(value=values, inplace=True) questions_interactions.prior_question_had_explanation = questions_interactions.prior_question_had_explanation.astype('int8') questions_interactions.loc[indeces, 'prior_question_had_explanation'] = -1 questions_interactions.prior_question_had_explanation.value_counts() lectures_interactions.drop(columns=['content_type_id', 'user_answer', 'answered_correctly', 'prior_question_elapsed_time', 'prior_question_had_explanation'], inplace=True) start_mem_usg1 = questions_interactions.memory_usage().sum() / 1024 ** 2 start_mem_usg2 = lectures_interactions.memory_usage().sum() / 1024 ** 2 lectures_interactions.head()
code
74060518/cell_7
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import glob import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) print('Total train images:', len(train_files)) print('Total validation images:', len(val_files))
code
74060518/cell_18
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import cv2 import glob import matplotlib.pyplot as plt import numpy as np import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' BATCH_SIZE = 4 INPUT_IMG_SIZE = 256 OUTPUT_CLASSES = 10 LEARINING_RATE = 0.001 train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1,2, figsize = (20,5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) #ax.axis('off') plt.show() for i in train_files[:2]: org_img = cv2.imread(i) img = org_img[:, 0:256, :] msk = org_img[:, 256:, :] kmeans_data = [] for i in train_files[:50]: org_img = cv2.imread(i) msk = org_img[:, INPUT_IMG_SIZE:, :] kmeans_data.append(msk) kmeans_data = np.array(kmeans_data) kmeans_data = kmeans_data.reshape(-1, 3) encoder = KMeans(n_clusters=OUTPUT_CLASSES) encoder.fit(kmeans_data) colors = {0: [255, 0, 0], 1: [0, 255, 0], 2: [0, 0, 255], 3: [255, 69, 0], 4: [255, 0, 255], 5: [210, 105, 30], 6: [192, 255, 62], 7: [127, 255, 0], 8: [0, 238, 238], 9: [72, 118, 255]} for i in train_files[:3]: org_img = cv2.imread(i) msk = org_img[:, 256:, :] test = msk.reshape(-1, 3) pred = encoder.predict(test) enc_pred = pred.reshape(INPUT_IMG_SIZE, INPUT_IMG_SIZE) pred = np.array([colors[p] for p in pred]).reshape(256, 256, 3) def plot_grid_imgs(img_files_list, grid_size=(1, 3), figure_size=(15, 5), show_axis=True): fig, axes = plt.subplots(grid_size[0], grid_size[1], figsize=figure_size) for i, ax in enumerate(axes.ravel()): img = cv2.imread(img_files_list[i]) ax.imshow(img) if not show_axis: ax.axis('off') plt.show() plot_grid_imgs(train_files, (1, 3))
code
74060518/cell_8
[ "image_output_1.png" ]
import cv2 import glob import matplotlib.pyplot as plt import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1, 2, figsize=(20, 5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) plt.show()
code
74060518/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.cluster import KMeans import cv2 import glob import matplotlib.pyplot as plt import numpy as np import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' BATCH_SIZE = 4 INPUT_IMG_SIZE = 256 OUTPUT_CLASSES = 10 LEARINING_RATE = 0.001 train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1,2, figsize = (20,5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) #ax.axis('off') plt.show() for i in train_files[:2]: org_img = cv2.imread(i) img = org_img[:, 0:256, :] msk = org_img[:, 256:, :] kmeans_data = [] for i in train_files[:50]: org_img = cv2.imread(i) msk = org_img[:, INPUT_IMG_SIZE:, :] kmeans_data.append(msk) kmeans_data = np.array(kmeans_data) kmeans_data = kmeans_data.reshape(-1, 3) encoder = KMeans(n_clusters=OUTPUT_CLASSES) encoder.fit(kmeans_data)
code
74060518/cell_17
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import cv2 import glob import matplotlib.pyplot as plt import numpy as np import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' BATCH_SIZE = 4 INPUT_IMG_SIZE = 256 OUTPUT_CLASSES = 10 LEARINING_RATE = 0.001 train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1,2, figsize = (20,5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) #ax.axis('off') plt.show() for i in train_files[:2]: org_img = cv2.imread(i) img = org_img[:, 0:256, :] msk = org_img[:, 256:, :] kmeans_data = [] for i in train_files[:50]: org_img = cv2.imread(i) msk = org_img[:, INPUT_IMG_SIZE:, :] kmeans_data.append(msk) kmeans_data = np.array(kmeans_data) kmeans_data = kmeans_data.reshape(-1, 3) encoder = KMeans(n_clusters=OUTPUT_CLASSES) encoder.fit(kmeans_data) colors = {0: [255, 0, 0], 1: [0, 255, 0], 2: [0, 0, 255], 3: [255, 69, 0], 4: [255, 0, 255], 5: [210, 105, 30], 6: [192, 255, 62], 7: [127, 255, 0], 8: [0, 238, 238], 9: [72, 118, 255]} for i in train_files[:3]: org_img = cv2.imread(i) msk = org_img[:, 256:, :] test = msk.reshape(-1, 3) pred = encoder.predict(test) enc_pred = pred.reshape(INPUT_IMG_SIZE, INPUT_IMG_SIZE) pred = np.array([colors[p] for p in pred]).reshape(256, 256, 3) print('No of classes in encoded mask:', np.unique(enc_pred)) plt.figure(figsize=(15, 10)) plt.subplot(1, 2, 1) plt.imshow(msk) plt.title('Original mask (RGB)') plt.subplot(1, 2, 2) plt.imshow(pred) plt.title('Encoded mask') plt.show()
code
74060518/cell_14
[ "image_output_1.png" ]
import cv2 import glob import matplotlib.pyplot as plt import numpy as np import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' BATCH_SIZE = 4 INPUT_IMG_SIZE = 256 OUTPUT_CLASSES = 10 LEARINING_RATE = 0.001 train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1,2, figsize = (20,5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) #ax.axis('off') plt.show() for i in train_files[:2]: org_img = cv2.imread(i) img = org_img[:, 0:256, :] msk = org_img[:, 256:, :] kmeans_data = [] for i in train_files[:50]: org_img = cv2.imread(i) msk = org_img[:, INPUT_IMG_SIZE:, :] kmeans_data.append(msk) kmeans_data = np.array(kmeans_data) kmeans_data = kmeans_data.reshape(-1, 3) print(kmeans_data.shape)
code
74060518/cell_10
[ "text_plain_output_1.png" ]
import cv2 import glob import matplotlib.pyplot as plt import os INPUT_PATH = '../input/cityscapes-image-pairs/cityscapes_data' train_files = glob.glob(os.path.join(INPUT_PATH + '/train', '*jpg')) val_files = glob.glob(os.path.join(INPUT_PATH + '/val', '*jpg')) fig, axes = plt.subplots(1,2, figsize = (20,5)) for i, ax in enumerate(axes.ravel()): img = cv2.imread(train_files[i]) ax.imshow(img) #ax.axis('off') plt.show() for i in train_files[:2]: org_img = cv2.imread(i) img = org_img[:, 0:256, :] msk = org_img[:, 256:, :] print('-' * 40) print('Original image shape:', img.shape) print('Mask image shape:', msk.shape) print('-' * 40) plt.figure(figsize=(12, 8)) plt.subplot(1, 2, 1) plt.imshow(img) plt.title('Original image') plt.subplot(1, 2, 2) plt.imshow(msk) plt.title('Mask (RGB)') plt.show()
code
50242460/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/credit-card-customers/BankChurners.csv', dtype={'Income_Category': 'str'}).iloc[:, :-2] y = df['Attrition_Flag'] df.drop(['Attrition_Flag'], axis=1, inplace=True) df.head()
code
50242460/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt plt.figure(1, figsize=(4, 3), dpi=100) axes = plt.gca() axes.set_ylim([0.5, 0.9]) plt.text(x=0.0, y=0.75, s='16%', fontsize=60, color='#ae012e', fontweight='medium') plt.text(x=0.0, y=0.63, s='of total data points have \ncustomers who will attrit', fontsize=20, color='gray') plt.axis('off')
code
73077959/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import classification_report from tensorflow.keras.models import load_model import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import random import seaborn as sns import tensorflow as tf import numpy as np import pandas as pd import os model = load_model('../input/efficientnetv2transfer/model-18-fine_after.h5') def set_seed(seed=200): tf.random.set_seed(seed) np.random.seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) set_seed(0) x_test = np.load('../input/pneumonia-xray-datagen-np/x_test.npy') y_test = np.load('../input/pneumonia-xray-datagen-np/y_test.npy') test_evalu = model.evaluate(x_test, y_test) def check(labels, predictions): cmx = tf.math.confusion_matrix(labels, predictions, num_classes=None, weights=None, dtype=tf.dtypes.int32, name=None) target_names = ['Normal', 'Pneumonia'] df_cmx = pd.DataFrame(np.array(cmx), index=target_names, columns=target_names) Y_pred = model.predict(x_test) y_pred = Y_pred.reshape(Y_pred.shape[0]).tolist() cut = 0.5 for i in range(len(y_pred)): if y_pred[i] >= cut: y_pred[i] = 1 else: y_pred[i] = 0 check(y_test, y_pred)
code
73077959/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73077959/cell_7
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import load_model import numpy as np import numpy as np # linear algebra import os import os import random import tensorflow as tf import numpy as np import pandas as pd import os model = load_model('../input/efficientnetv2transfer/model-18-fine_after.h5') def set_seed(seed=200): tf.random.set_seed(seed) np.random.seed(seed) random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) set_seed(0) x_test = np.load('../input/pneumonia-xray-datagen-np/x_test.npy') y_test = np.load('../input/pneumonia-xray-datagen-np/y_test.npy') test_evalu = model.evaluate(x_test, y_test)
code
73077959/cell_3
[ "text_plain_output_1.png" ]
!pip install -U git+https://github.com/GdoongMathew/EfficientNetV2 --no-deps
code
1009415/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009415/cell_7
[ "text_plain_output_1.png" ]
import glob import glob train_image_filenames = sorted(glob.glob('../input/train.7z')) print('Found images:') print(train_image_filenames)
code
1009415/cell_5
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
33109223/cell_21
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm data_normal = norm.rvs(size=10000, loc=0, scale=1) stats.kstest(data_normal, 'norm')
code
33109223/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import gamma from scipy.stats import norm from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency') data_gamma = gamma.rvs(a=5, size=10000) ax = sns.distplot(data_gamma, kde=True, bins=100, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Gamma Distribution', ylabel='Frequency')
code
33109223/cell_54
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import bernoulli from scipy.stats import binom from scipy.stats import expon from scipy.stats import gamma from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency') data_gamma = gamma.rvs(a=5, size=10000) ax = sns.distplot(data_gamma, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma Distribution', ylabel='Frequency') data_expon = expon.rvs(scale=1, loc=0, size=1000) ax = sns.distplot(data_expon, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Exponential Distribution', ylabel='Frequency') data_poisson = poisson.rvs(mu=3, size=10000) ax = sns.distplot(data_poisson, bins=30, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Poisson Distribution', ylabel='Frequency') data_binom = binom.rvs(n=10, p=0.8, size=10000) ax = sns.distplot(data_binom, kde=False, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Binomial Distribution', ylabel='Frequency') data_bern = bernoulli.rvs(size=10000, p=0.6) ax = sns.distplot(data_bern, kde=False, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Bernoulli Distribution', ylabel='Frequency')
code
33109223/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import norm from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency')
code
33109223/cell_49
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import binom from scipy.stats import expon from scipy.stats import gamma from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency') data_gamma = gamma.rvs(a=5, size=10000) ax = sns.distplot(data_gamma, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma Distribution', ylabel='Frequency') data_expon = expon.rvs(scale=1, loc=0, size=1000) ax = sns.distplot(data_expon, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Exponential Distribution', ylabel='Frequency') data_poisson = poisson.rvs(mu=3, size=10000) ax = sns.distplot(data_poisson, bins=30, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Poisson Distribution', ylabel='Frequency') data_binom = binom.rvs(n=10, p=0.8, size=10000) ax = sns.distplot(data_binom, kde=False, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Binomial Distribution', ylabel='Frequency')
code
33109223/cell_43
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import expon from scipy.stats import gamma from scipy.stats import norm from scipy.stats import poisson from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency') data_gamma = gamma.rvs(a=5, size=10000) ax = sns.distplot(data_gamma, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma Distribution', ylabel='Frequency') data_expon = expon.rvs(scale=1, loc=0, size=1000) ax = sns.distplot(data_expon, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Exponential Distribution', ylabel='Frequency') data_poisson = poisson.rvs(mu=3, size=10000) ax = sns.distplot(data_poisson, bins=30, kde=True, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Poisson Distribution', ylabel='Frequency')
code
33109223/cell_22
[ "text_plain_output_1.png" ]
from scipy import stats from scipy.stats import norm data_normal = norm.rvs(size=10000, loc=0, scale=1) stats.kstest(data_normal, 'norm') stats.anderson(data_normal, dist='norm')
code
33109223/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency')
code
33109223/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
from scipy.stats import expon from scipy.stats import gamma from scipy.stats import norm from scipy.stats import uniform import seaborn as sns n = 10000 start = 10 width = 20 data_uniform = uniform.rvs(size=n, loc=start, scale=width) ax = sns.distplot(data_uniform, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Uniform Distribution ', ylabel='Frequency') data_normal = norm.rvs(size=10000, loc=0, scale=1) ax = sns.distplot(data_normal, bins=100, kde=True, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Normal Distribution', ylabel='Frequency') data_gamma = gamma.rvs(a=5, size=10000) ax = sns.distplot(data_gamma, kde=True, bins=100, color='skyblue', hist_kws={"linewidth": 15,'alpha':1}) ax.set(xlabel='Gamma Distribution', ylabel='Frequency') data_expon = expon.rvs(scale=1, loc=0, size=1000) ax = sns.distplot(data_expon, kde=True, bins=100, color='skyblue', hist_kws={'linewidth': 15, 'alpha': 1}) ax.set(xlabel='Exponential Distribution', ylabel='Frequency')
code
32068746/cell_13
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('../input/covid19-global-forecasting-week-4/test.csv') submission = pd.read_csv('../input/covid19-global-forecasting-week-4/submission.csv') train['Country_Region'] = train['Country_Region'] + ' ' + train['Province_State'] test['Country_Region'] = test['Country_Region'] + ' ' + test['Province_State'] train.drop(['Province_State'], axis=1, inplace=True) test.drop(['Province_State'], axis=1, inplace=True) split_data_train = train['Date'].str.split('-').to_list() split_data_test = test['Date'].str.split('-').to_list() train_date = pd.DataFrame(split_data_train, columns=['Year', 'Month', 'Date']) test_date = pd.DataFrame(split_data_test, columns=['Year', 'Month', 'Date']) del train_date['Year'] del test_date['Year'] train_date['Month'] = train_date['Month'].astype(int) test_date['Month'] = test_date['Month'].astype(int) train_date['Date'] = train_date['Date'].astype(int) test_date['Date'] = test_date['Date'].astype(int) del train['Date'] del test['Date'] train = pd.concat([train, train_date], axis=1) test = pd.concat([test, test_date], axis=1) train_x_full = train[['Country_Region', 'Month', 'Date']].copy() train_y_full = train[['ConfirmedCases', 'Fatalities']].copy() model = LinearRegression() model.fit(x_train, y_train) preds = model.predict(x_valid) model = RandomForestRegressor() model.fit(train_x_full, train_y_full) preds = model.predict(x_valid) print(mean_absolute_error(preds, y_valid))
code
32068746/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32068746/cell_12
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error model = LinearRegression() model.fit(x_train, y_train) preds = model.predict(x_valid) print(mean_absolute_error(preds, y_valid))
code
128001996/cell_9
[ "text_plain_output_100.png", "text_plain_output_334.png", "application_vnd.jupyter.stderr_output_145.png", "application_vnd.jupyter.stderr_output_289.png", "application_vnd.jupyter.stderr_output_313.png", "application_vnd.jupyter.stderr_output_373.png", "text_plain_output_84.png", "text_plain_output_322.png", "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_115.png", "application_vnd.jupyter.stderr_output_207.png", "application_vnd.jupyter.stderr_output_341.png", "text_plain_output_56.png", "text_plain_output_158.png", "text_plain_output_218.png", "text_plain_output_264.png", "application_vnd.jupyter.stderr_output_35.png", "text_plain_output_282.png", "text_plain_output_396.png", "text_plain_output_232.png", "text_plain_output_362.png", "application_vnd.jupyter.stderr_output_77.png", "text_plain_output_258.png", "application_vnd.jupyter.stderr_output_417.png", "text_plain_output_452.png", "text_plain_output_130.png", "application_vnd.jupyter.stderr_output_461.png", "application_vnd.jupyter.stderr_output_205.png", "application_vnd.jupyter.stderr_output_203.png", "text_plain_output_462.png", "text_plain_output_286.png", "application_vnd.jupyter.stderr_output_185.png", "application_vnd.jupyter.stderr_output_227.png", "text_plain_output_262.png", "application_vnd.jupyter.stderr_output_287.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_278.png", "text_plain_output_254.png", "application_vnd.jupyter.stderr_output_335.png", "application_vnd.jupyter.stderr_output_215.png", "application_vnd.jupyter.stderr_output_283.png", "application_vnd.jupyter.stderr_output_449.png", "text_plain_output_98.png", "text_plain_output_236.png", "application_vnd.jupyter.stderr_output_223.png", "application_vnd.jupyter.stderr_output_435.png", "application_vnd.jupyter.stderr_output_219.png", "application_vnd.jupyter.stderr_output_279.png", "application_vnd.jupyter.stderr_output_81.png", "application_vnd.jupyter.stderr_output_111.png", "text_plain_output_420.png", "application_vnd.jupyter.stderr_output_53.png", "application_vnd.jupyter.stderr_output_131.png", "application_vnd.jupyter.stderr_output_437.png", "text_plain_output_284.png", "text_plain_output_78.png", "application_vnd.jupyter.stderr_output_99.png", "text_plain_output_106.png", "text_plain_output_138.png", "application_vnd.jupyter.stderr_output_385.png", "text_plain_output_192.png", "text_plain_output_426.png", "application_vnd.jupyter.stderr_output_183.png", "application_vnd.jupyter.stderr_output_181.png", "application_vnd.jupyter.stderr_output_299.png", "application_vnd.jupyter.stderr_output_141.png", "text_plain_output_184.png", "text_plain_output_274.png", "text_plain_output_172.png", "application_vnd.jupyter.stderr_output_297.png", "application_vnd.jupyter.stderr_output_93.png", "text_plain_output_332.png", "text_plain_output_256.png", "text_plain_output_90.png", "application_vnd.jupyter.stderr_output_471.png", "application_vnd.jupyter.stderr_output_123.png", "application_vnd.jupyter.stderr_output_465.png", "text_plain_output_48.png", "text_plain_output_388.png", "application_vnd.jupyter.stderr_output_391.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_126.png", "application_vnd.jupyter.stderr_output_355.png", "application_vnd.jupyter.stderr_output_421.png", "application_vnd.jupyter.stderr_output_431.png", "text_plain_output_272.png", "application_vnd.jupyter.stderr_output_73.png", "application_vnd.jupyter.stderr_output_137.png", "application_vnd.jupyter.stderr_output_133.png", "application_vnd.jupyter.stderr_output_165.png", "application_vnd.jupyter.stderr_output_381.png", "application_vnd.jupyter.stderr_output_75.png", "text_plain_output_316.png", "application_vnd.jupyter.stderr_output_365.png", "text_plain_output_390.png", "text_plain_output_198.png", "application_vnd.jupyter.stderr_output_321.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_467.png", "application_vnd.jupyter.stderr_output_407.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_404.png", "text_plain_output_114.png", "application_vnd.jupyter.stderr_output_447.png", "application_vnd.jupyter.stderr_output_11.png", "application_vnd.jupyter.stderr_output_361.png", "application_vnd.jupyter.stderr_output_155.png", "text_plain_output_470.png", "text_plain_output_70.png", "text_plain_output_44.png", "application_vnd.jupyter.stderr_output_423.png", "application_vnd.jupyter.stderr_output_277.png", "application_vnd.jupyter.stderr_output_291.png", "application_vnd.jupyter.stderr_output_231.png", "application_vnd.jupyter.stderr_output_317.png", "application_vnd.jupyter.stderr_output_65.png", "application_vnd.jupyter.stderr_output_443.png", "application_vnd.jupyter.stderr_output_235.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_118.png", "application_vnd.jupyter.stderr_output_453.png", "application_vnd.jupyter.stderr_output_179.png", "application_vnd.jupyter.stderr_output_143.png", "application_vnd.jupyter.stderr_output_409.png", "text_plain_output_40.png", "text_plain_output_74.png", "application_vnd.jupyter.stderr_output_171.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_340.png", "application_vnd.jupyter.stderr_output_351.png", "application_vnd.jupyter.stderr_output_105.png", "text_plain_output_20.png", "application_vnd.jupyter.stderr_output_275.png", "application_vnd.jupyter.stderr_output_345.png", "text_plain_output_102.png", "application_vnd.jupyter.stderr_output_439.png", "text_plain_output_414.png", "application_vnd.jupyter.stderr_output_371.png", "text_plain_output_222.png", "application_vnd.jupyter.stderr_output_253.png", "text_plain_output_144.png", "application_vnd.jupyter.stderr_output_389.png", "application_vnd.jupyter.stderr_output_323.png", "application_vnd.jupyter.stderr_output_387.png", "text_plain_output_132.png", "text_plain_output_60.png", "application_vnd.jupyter.stderr_output_393.png", "application_vnd.jupyter.stderr_output_31.png", "application_vnd.jupyter.stderr_output_125.png", "text_plain_output_330.png", "text_plain_output_434.png", "application_vnd.jupyter.stderr_output_113.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_64.png", "application_vnd.jupyter.stderr_output_221.png", "application_vnd.jupyter.stderr_output_305.png", "text_plain_output_200.png", "application_vnd.jupyter.stderr_output_383.png", "application_vnd.jupyter.stderr_output_33.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "application_vnd.jupyter.stderr_output_245.png", "text_plain_output_318.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_52.png", "application_vnd.jupyter.stderr_output_419.png", "text_plain_output_66.png", "text_plain_output_446.png", "application_vnd.jupyter.stderr_output_403.png", "application_vnd.jupyter.stderr_output_249.png", "application_vnd.jupyter.stderr_output_229.png", "application_vnd.jupyter.stderr_output_263.png", "text_plain_output_380.png", "text_plain_output_442.png", "application_vnd.jupyter.stderr_output_273.png", "application_vnd.jupyter.stderr_output_135.png", "text_plain_output_300.png", "application_vnd.jupyter.stderr_output_211.png", "application_vnd.jupyter.stderr_output_463.png", "application_vnd.jupyter.stderr_output_285.png", "application_vnd.jupyter.stderr_output_177.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "application_vnd.jupyter.stderr_output_89.png", "text_plain_output_140.png", "application_vnd.jupyter.stderr_output_269.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_242.png", "text_plain_output_460.png", "application_vnd.jupyter.stderr_output_189.png", "text_plain_output_160.png", "text_plain_output_58.png", "application_vnd.jupyter.stderr_output_149.png", "application_vnd.jupyter.stderr_output_91.png", "application_vnd.jupyter.stderr_output_239.png", "text_plain_output_260.png", "application_vnd.jupyter.stderr_output_95.png", "text_plain_output_294.png", "text_plain_output_392.png", "text_plain_output_320.png", "application_vnd.jupyter.stderr_output_67.png", "application_vnd.jupyter.stderr_output_237.png", "application_vnd.jupyter.stderr_output_339.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_108.png", "application_vnd.jupyter.stderr_output_337.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_276.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_71.png", "text_plain_output_326.png", "application_vnd.jupyter.stderr_output_259.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_120.png", "application_vnd.jupyter.stderr_output_293.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_257.png", "application_vnd.jupyter.stderr_output_23.png", "application_vnd.jupyter.stderr_output_159.png", "text_plain_output_344.png", "application_vnd.jupyter.stderr_output_325.png", "application_vnd.jupyter.stderr_output_247.png", "text_plain_output_104.png", "text_plain_output_270.png", "text_plain_output_466.png", "application_vnd.jupyter.stderr_output_59.png", "text_plain_output_134.png", "text_plain_output_288.png", "application_vnd.jupyter.stderr_output_197.png", "application_vnd.jupyter.stderr_output_369.png", "application_vnd.jupyter.stderr_output_459.png", "text_plain_output_18.png", "text_plain_output_266.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "application_vnd.jupyter.stderr_output_441.png", "application_vnd.jupyter.stderr_output_83.png", "text_plain_output_96.png", "application_vnd.jupyter.stderr_output_19.png", "text_plain_output_418.png", "text_plain_output_180.png", "text_plain_output_210.png", "text_plain_output_112.png", "application_vnd.jupyter.stderr_output_281.png", "text_plain_output_152.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_127.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_290.png", "application_vnd.jupyter.stderr_output_195.png", "application_vnd.jupyter.stderr_output_331.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "application_vnd.jupyter.stderr_output_327.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_261.png", "text_plain_output_328.png", "application_vnd.jupyter.stderr_output_301.png", "text_plain_output_368.png", "application_vnd.jupyter.stderr_output_347.png", "text_plain_output_372.png", "application_vnd.jupyter.stderr_output_411.png", "text_plain_output_146.png", "text_plain_output_454.png", "application_vnd.jupyter.stderr_output_241.png", "application_vnd.jupyter.stderr_output_405.png", "application_vnd.jupyter.stderr_output_163.png", "text_plain_output_338.png", "application_vnd.jupyter.stderr_output_151.png", "application_vnd.jupyter.stderr_output_103.png", "application_vnd.jupyter.stderr_output_109.png", "text_plain_output_382.png", "text_plain_output_38.png", "application_vnd.jupyter.stderr_output_367.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "application_vnd.jupyter.stderr_output_97.png", "application_vnd.jupyter.stderr_output_395.png", "text_plain_output_314.png", "text_plain_output_410.png", "text_plain_output_432.png", "application_vnd.jupyter.stderr_output_201.png", "application_vnd.jupyter.stderr_output_307.png", "text_plain_output_308.png", "text_plain_output_16.png", "text_plain_output_174.png", "text_plain_output_212.png", "text_plain_output_230.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_430.png", "text_plain_output_378.png", "text_plain_output_206.png", "text_plain_output_8.png", "text_plain_output_122.png", "application_vnd.jupyter.stderr_output_193.png", "text_plain_output_384.png", "application_vnd.jupyter.stderr_output_17.png", "application_vnd.jupyter.stderr_output_87.png", "text_plain_output_182.png", "text_plain_output_26.png", "application_vnd.jupyter.stderr_output_187.png", "text_plain_output_406.png", "application_vnd.jupyter.stderr_output_445.png", "text_plain_output_310.png", "text_plain_output_456.png", "application_vnd.jupyter.stderr_output_455.png", "application_vnd.jupyter.stderr_output_469.png", "text_plain_output_220.png", "application_vnd.jupyter.stderr_output_117.png", "application_vnd.jupyter.stderr_output_413.png", "text_plain_output_238.png", "application_vnd.jupyter.stderr_output_401.png", "text_plain_output_34.png", "text_plain_output_346.png", "text_plain_output_168.png", "application_vnd.jupyter.stderr_output_69.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_350.png", "application_vnd.jupyter.stderr_output_41.png", "application_vnd.jupyter.stderr_output_157.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_468.png", "text_plain_output_370.png", "application_vnd.jupyter.stderr_output_377.png", "text_plain_output_224.png", "application_vnd.jupyter.stderr_output_167.png", "application_vnd.jupyter.stderr_output_79.png", "application_vnd.jupyter.stderr_output_49.png", "application_vnd.jupyter.stderr_output_333.png", "application_vnd.jupyter.stderr_output_63.png", "application_vnd.jupyter.stderr_output_47.png", "application_vnd.jupyter.stderr_output_57.png", "application_vnd.jupyter.stderr_output_363.png", "application_vnd.jupyter.stderr_output_169.png", "text_plain_output_450.png", "text_plain_output_252.png", "application_vnd.jupyter.stderr_output_415.png", "application_vnd.jupyter.stderr_output_343.png", "text_plain_output_296.png", "text_plain_output_28.png", "text_plain_output_72.png", "application_vnd.jupyter.stderr_output_173.png", "application_vnd.jupyter.stderr_output_319.png", "application_vnd.jupyter.stderr_output_191.png", "application_vnd.jupyter.stderr_output_399.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_246.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_433.png", "application_vnd.jupyter.stderr_output_213.png", "application_vnd.jupyter.stderr_output_349.png", "application_vnd.jupyter.stderr_output_397.png", "application_vnd.jupyter.stderr_output_429.png", "text_plain_output_196.png", "text_plain_output_342.png", "application_vnd.jupyter.stderr_output_295.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_101.png", "application_vnd.jupyter.stderr_output_359.png", "application_vnd.jupyter.stderr_output_267.png", "application_vnd.jupyter.stderr_output_225.png", "application_vnd.jupyter.stderr_output_209.png", "application_vnd.jupyter.stderr_output_139.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_176.png", "application_vnd.jupyter.stderr_output_217.png", "application_vnd.jupyter.stderr_output_61.png", "text_plain_output_186.png", "application_vnd.jupyter.stderr_output_51.png", "application_vnd.jupyter.stderr_output_311.png", "text_plain_output_228.png", "text_plain_output_412.png", "application_vnd.jupyter.stderr_output_353.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_354.png", "text_plain_output_360.png", "text_plain_output_82.png", "text_plain_output_356.png", "text_plain_output_202.png", "application_vnd.jupyter.stderr_output_161.png", "application_vnd.jupyter.stderr_output_379.png", "text_plain_output_336.png", "application_vnd.jupyter.stderr_output_427.png", "text_plain_output_80.png", "text_plain_output_94.png", "text_plain_output_164.png", "application_vnd.jupyter.stderr_output_233.png", "text_plain_output_444.png", "application_vnd.jupyter.stderr_output_153.png", "text_plain_output_216.png", "text_plain_output_124.png", "application_vnd.jupyter.stderr_output_45.png", "text_plain_output_148.png", "text_plain_output_402.png", "text_plain_output_424.png", "text_plain_output_250.png", "application_vnd.jupyter.stderr_output_425.png", "application_vnd.jupyter.stderr_output_175.png", "text_plain_output_400.png", "application_vnd.jupyter.stderr_output_457.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_39.png", "text_plain_output_408.png", "application_vnd.jupyter.stderr_output_119.png", "application_vnd.jupyter.stderr_output_309.png", "application_vnd.jupyter.stderr_output_107.png", "text_plain_output_428.png", "application_vnd.jupyter.stderr_output_255.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_43.png", "application_vnd.jupyter.stderr_output_357.png", "text_plain_output_416.png", "application_vnd.jupyter.stderr_output_265.png", "text_plain_output_194.png", "application_vnd.jupyter.stderr_output_85.png", "text_plain_output_62.png", "text_plain_output_440.png", "text_plain_output_458.png", "application_vnd.jupyter.stderr_output_271.png", "application_vnd.jupyter.stderr_output_55.png", "text_plain_output_464.png", "application_vnd.jupyter.stderr_output_303.png", "text_plain_output_156.png", "application_vnd.jupyter.stderr_output_147.png", "application_vnd.jupyter.stderr_output_375.png", "text_plain_output_298.png", "application_vnd.jupyter.stderr_output_121.png", "text_plain_output_348.png", "application_vnd.jupyter.stderr_output_451.png", "text_plain_output_448.png", "text_plain_output_364.png", "application_vnd.jupyter.stderr_output_329.png", "application_vnd.jupyter.stderr_output_243.png", "text_plain_output_352.png", "application_vnd.jupyter.stderr_output_199.png", "text_plain_output_374.png", "text_plain_output_472.png", "application_vnd.jupyter.stderr_output_129.png", "application_vnd.jupyter.stderr_output_251.png", "text_plain_output_292.png", "application_vnd.jupyter.stderr_output_315.png", "application_vnd.jupyter.stderr_output_37.png", "text_plain_output_306.png", "text_plain_output_46.png" ]
import pandas as pd import seaborn as sns target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) sns.histplot(data=df, x=target, stat='density', color='b')
code
128001996/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.head()
code
128001996/cell_23
[ "image_output_1.png" ]
from sklearn.metrics import roc_auc_score, mean_absolute_error from sklearn.model_selection import train_test_split, RepeatedKFold, StratifiedKFold from sklearn.preprocessing import StandardScaler from xgboost import XGBRegressor, XGBClassifier import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns if True: num_columns = ['fruitset', 'fruitmass', 'seeds',] ncols = 4 for n, col in enumerate(num_columns): if n % ncols == 0: fig, axs = plt.subplots(ncols=ncols, figsize=(24,6)) ax = axs[n % ncols] #sns.kdeplot(data=df, x=col, hue=target, ax=ax, palette='bright', common_norm=False, warn_singular=False) sns.histplot(data=df[col], ax=ax); def explore_categorical_value(df, col, target='yield'): values = df[col].unique() values.sort() for value in values: select = df.loc[df[col] == value] mn = select[target].mean() cnt = len(select) cnt_test = len(select[select.test == 1]) cat_columns = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'RainingDays', 'AverageRainingDays'] for col in cat_columns: explore_categorical_value(df, col) if True: for col in num_columns + cat_columns: df[f'log_{col}'] = np.log(df[col] + 1e-06) score_function = mean_absolute_error def get_numpy_arrays(data): X = data.drop(['id', 'test', target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def ensamble_pred(clfs, X): pred = 0 for clf in clfs: pred += clf.predict_proba(X) if sklearn.base.is_classifier(clf) else clf.predict(X).squeeze() return np.argmax(pred, axis=1) if sklearn.base.is_classifier(clf) else pred / len(clfs) def print_validation_score(clfs): pred = ensamble_pred(clfs, X_test) score = score_function(y_test, pred) return score def examine_clf(X_train_val, y_train_val, clf, rkf, verbose=None): avg_score = 0 clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = np.argmax(clf.predict_proba(X_val), axis=1) if sklearn.base.is_classifier(clf) else clf.predict(X_val) score = score_function(y_val, pred) avg_score += score clfs.append(clf) avg_score /= n + 1 return (clfs, avg_score) def examine_clf_print(X_train_val, y_train_val, clf, rkf, verbose=None): clfs, avg_score = examine_clf(X_train_val, y_train_val, clf, rkf, verbose=verbose) return clfs def adv_get_numpy_arrays(data): X = data.drop(['id', 'test', 'gen', target], axis=1).to_numpy() y = data['test'].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def get_CV_results(X_train_val, y_train_val, clf, rkf, verbose=None): results = np.zeros_like(y_train_val, dtype=float) all_clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = clf.predict_proba(X_val)[:, 1] if sklearn.base.is_classifier(clf) else clf.predict(X_val) results[val_index] += pred all_clfs.append(clf) return (results / n, all_clfs) adv_df = df.copy() adv_X, adv_y = adv_get_numpy_arrays(adv_df) rkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10) tree_method = 'gpu_hist' if gpu_available else 'hist' xgbc = XGBClassifier(n_estimators=140, max_depth=3, tree_method=tree_method) xgbc_results, xgbc_clfs = get_CV_results(adv_X, adv_y, xgbc, rkf) print(roc_auc_score(adv_y, xgbc_results))
code
128001996/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) pd.isna(df).sum()
code
128001996/cell_2
[ "image_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import sklearn from sklearn.model_selection import train_test_split, RepeatedKFold, StratifiedKFold from sklearn.metrics import roc_auc_score, mean_absolute_error from sklearn.preprocessing import StandardScaler import optuna from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from xgboost import XGBRegressor, XGBClassifier from catboost import CatBoostRegressor, CatBoostClassifier from sklearn.linear_model import Lasso, Ridge, LogisticRegression from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier from lightgbm import LGBMClassifier, LGBMRegressor import torch gpu_available = torch.cuda.is_available() TRAIN_CSV = '/kaggle/input/playground-series-s3e14/train.csv' TEST_CSV = '/kaggle/input/playground-series-s3e14/test.csv' EXTERNAL_CSV = '/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv'
code
128001996/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso, Ridge, LogisticRegression from sklearn.metrics import roc_auc_score, mean_absolute_error from sklearn.model_selection import train_test_split, RepeatedKFold, StratifiedKFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns if True: num_columns = ['fruitset', 'fruitmass', 'seeds',] ncols = 4 for n, col in enumerate(num_columns): if n % ncols == 0: fig, axs = plt.subplots(ncols=ncols, figsize=(24,6)) ax = axs[n % ncols] #sns.kdeplot(data=df, x=col, hue=target, ax=ax, palette='bright', common_norm=False, warn_singular=False) sns.histplot(data=df[col], ax=ax); def explore_categorical_value(df, col, target='yield'): values = df[col].unique() values.sort() for value in values: select = df.loc[df[col] == value] mn = select[target].mean() cnt = len(select) cnt_test = len(select[select.test == 1]) cat_columns = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'RainingDays', 'AverageRainingDays'] for col in cat_columns: explore_categorical_value(df, col) if True: for col in num_columns + cat_columns: df[f'log_{col}'] = np.log(df[col] + 1e-06) score_function = mean_absolute_error def get_numpy_arrays(data): X = data.drop(['id', 'test', target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def ensamble_pred(clfs, X): pred = 0 for clf in clfs: pred += clf.predict_proba(X) if sklearn.base.is_classifier(clf) else clf.predict(X).squeeze() return np.argmax(pred, axis=1) if sklearn.base.is_classifier(clf) else pred / len(clfs) def print_validation_score(clfs): pred = ensamble_pred(clfs, X_test) score = score_function(y_test, pred) return score def examine_clf(X_train_val, y_train_val, clf, rkf, verbose=None): avg_score = 0 clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = np.argmax(clf.predict_proba(X_val), axis=1) if sklearn.base.is_classifier(clf) else clf.predict(X_val) score = score_function(y_val, pred) avg_score += score clfs.append(clf) avg_score /= n + 1 return (clfs, avg_score) def examine_clf_print(X_train_val, y_train_val, clf, rkf, verbose=None): clfs, avg_score = examine_clf(X_train_val, y_train_val, clf, rkf, verbose=verbose) return clfs def adv_get_numpy_arrays(data): X = data.drop(['id', 'test', 'gen', target], axis=1).to_numpy() y = data['test'].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def get_CV_results(X_train_val, y_train_val, clf, rkf, verbose=None): results = np.zeros_like(y_train_val, dtype=float) all_clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = clf.predict_proba(X_val)[:, 1] if sklearn.base.is_classifier(clf) else clf.predict(X_val) results[val_index] += pred all_clfs.append(clf) return (results / n, all_clfs) adv_df = df.copy() adv_X, adv_y = adv_get_numpy_arrays(adv_df) rkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10) validation_part = 0.2 adv_df['adv'] = rfc_results + xgbc_results + cbc_results adv_df_train_val_test = adv_df[adv_df.test == 0].sort_values('adv', ascending=False).drop('adv', axis=1) val_split = int(validation_part * len(adv_df_train_val_test)) data_train_val = adv_df_train_val_test[val_split:] data_test = adv_df_train_val_test[:val_split] X_train_val, y_train_val = get_numpy_arrays(data_train_val) X_test, y_test = get_numpy_arrays(data_test) data_submit = df[df.test == 1].copy() X_submit, y_submit = get_numpy_arrays(data_submit) rkf = RepeatedKFold(n_splits=5, n_repeats=3, random_state=0) all_clfs = [] lasso_clfs = examine_clf_print(X_train_val, y_train_val, Lasso(alpha=0.01, max_iter=1000), rkf) ridge_clfs = examine_clf_print(X_train_val, y_train_val, Ridge(alpha=2, max_iter=1000), rkf)
code
128001996/cell_24
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor, CatBoostClassifier from sklearn.metrics import roc_auc_score, mean_absolute_error from sklearn.model_selection import train_test_split, RepeatedKFold, StratifiedKFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns if True: num_columns = ['fruitset', 'fruitmass', 'seeds',] ncols = 4 for n, col in enumerate(num_columns): if n % ncols == 0: fig, axs = plt.subplots(ncols=ncols, figsize=(24,6)) ax = axs[n % ncols] #sns.kdeplot(data=df, x=col, hue=target, ax=ax, palette='bright', common_norm=False, warn_singular=False) sns.histplot(data=df[col], ax=ax); def explore_categorical_value(df, col, target='yield'): values = df[col].unique() values.sort() for value in values: select = df.loc[df[col] == value] mn = select[target].mean() cnt = len(select) cnt_test = len(select[select.test == 1]) cat_columns = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'RainingDays', 'AverageRainingDays'] for col in cat_columns: explore_categorical_value(df, col) if True: for col in num_columns + cat_columns: df[f'log_{col}'] = np.log(df[col] + 1e-06) score_function = mean_absolute_error def get_numpy_arrays(data): X = data.drop(['id', 'test', target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def ensamble_pred(clfs, X): pred = 0 for clf in clfs: pred += clf.predict_proba(X) if sklearn.base.is_classifier(clf) else clf.predict(X).squeeze() return np.argmax(pred, axis=1) if sklearn.base.is_classifier(clf) else pred / len(clfs) def print_validation_score(clfs): pred = ensamble_pred(clfs, X_test) score = score_function(y_test, pred) return score def examine_clf(X_train_val, y_train_val, clf, rkf, verbose=None): avg_score = 0 clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = np.argmax(clf.predict_proba(X_val), axis=1) if sklearn.base.is_classifier(clf) else clf.predict(X_val) score = score_function(y_val, pred) avg_score += score clfs.append(clf) avg_score /= n + 1 return (clfs, avg_score) def examine_clf_print(X_train_val, y_train_val, clf, rkf, verbose=None): clfs, avg_score = examine_clf(X_train_val, y_train_val, clf, rkf, verbose=verbose) return clfs def adv_get_numpy_arrays(data): X = data.drop(['id', 'test', 'gen', target], axis=1).to_numpy() y = data['test'].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def get_CV_results(X_train_val, y_train_val, clf, rkf, verbose=None): results = np.zeros_like(y_train_val, dtype=float) all_clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = clf.predict_proba(X_val)[:, 1] if sklearn.base.is_classifier(clf) else clf.predict(X_val) results[val_index] += pred all_clfs.append(clf) return (results / n, all_clfs) adv_df = df.copy() adv_X, adv_y = adv_get_numpy_arrays(adv_df) rkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10) catboost_params = {'task_type': 'GPU', 'devices': '0:1'} if gpu_available else {} cbc = CatBoostClassifier(iterations=200, depth=2, learning_rate=0.1, loss_function='Logloss', **catboost_params) cbc_results, cbc_clfs = get_CV_results(adv_X, adv_y, cbc, rkf, False) print(roc_auc_score(adv_y, cbc_results))
code
128001996/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns def explore_categorical_value(df, col, target='yield'): print(f'{col}') values = df[col].unique() values.sort() for value in values: select = df.loc[df[col] == value] mn = select[target].mean() cnt = len(select) cnt_test = len(select[select.test == 1]) print(f'\t{value:16}\t{mn:.3f}\t{cnt}\t{cnt_test}') cat_columns = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'RainingDays', 'AverageRainingDays'] print(f'\t \tMean \t\tCount\tIn test') for col in cat_columns: explore_categorical_value(df, col)
code
128001996/cell_22
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier from sklearn.metrics import roc_auc_score, mean_absolute_error from sklearn.model_selection import train_test_split, RepeatedKFold, StratifiedKFold from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns if True: num_columns = ['fruitset', 'fruitmass', 'seeds',] ncols = 4 for n, col in enumerate(num_columns): if n % ncols == 0: fig, axs = plt.subplots(ncols=ncols, figsize=(24,6)) ax = axs[n % ncols] #sns.kdeplot(data=df, x=col, hue=target, ax=ax, palette='bright', common_norm=False, warn_singular=False) sns.histplot(data=df[col], ax=ax); def explore_categorical_value(df, col, target='yield'): values = df[col].unique() values.sort() for value in values: select = df.loc[df[col] == value] mn = select[target].mean() cnt = len(select) cnt_test = len(select[select.test == 1]) cat_columns = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange', 'RainingDays', 'AverageRainingDays'] for col in cat_columns: explore_categorical_value(df, col) if True: for col in num_columns + cat_columns: df[f'log_{col}'] = np.log(df[col] + 1e-06) score_function = mean_absolute_error def get_numpy_arrays(data): X = data.drop(['id', 'test', target], axis=1).to_numpy() y = data[target].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def ensamble_pred(clfs, X): pred = 0 for clf in clfs: pred += clf.predict_proba(X) if sklearn.base.is_classifier(clf) else clf.predict(X).squeeze() return np.argmax(pred, axis=1) if sklearn.base.is_classifier(clf) else pred / len(clfs) def print_validation_score(clfs): pred = ensamble_pred(clfs, X_test) score = score_function(y_test, pred) return score def examine_clf(X_train_val, y_train_val, clf, rkf, verbose=None): avg_score = 0 clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = np.argmax(clf.predict_proba(X_val), axis=1) if sklearn.base.is_classifier(clf) else clf.predict(X_val) score = score_function(y_val, pred) avg_score += score clfs.append(clf) avg_score /= n + 1 return (clfs, avg_score) def examine_clf_print(X_train_val, y_train_val, clf, rkf, verbose=None): clfs, avg_score = examine_clf(X_train_val, y_train_val, clf, rkf, verbose=verbose) return clfs def adv_get_numpy_arrays(data): X = data.drop(['id', 'test', 'gen', target], axis=1).to_numpy() y = data['test'].to_numpy() scaler = StandardScaler() X = scaler.fit_transform(X) return (X, y) def get_CV_results(X_train_val, y_train_val, clf, rkf, verbose=None): results = np.zeros_like(y_train_val, dtype=float) all_clfs = [] for n, (train_index, val_index) in enumerate(rkf.split(X_train_val, y_train_val)): X_train = X_train_val[train_index] y_train = y_train_val[train_index] X_val = X_train_val[val_index] y_val = y_train_val[val_index] clf = sklearn.base.clone(clf) if verbose is None: clf.fit(X_train, y_train) else: clf.fit(X_train, y_train, verbose=verbose) pred = clf.predict_proba(X_val)[:, 1] if sklearn.base.is_classifier(clf) else clf.predict(X_val) results[val_index] += pred all_clfs.append(clf) return (results / n, all_clfs) adv_df = df.copy() adv_X, adv_y = adv_get_numpy_arrays(adv_df) rkf = StratifiedKFold(n_splits=5, shuffle=True, random_state=10) rfc = RandomForestClassifier(n_estimators=300, max_depth=8, n_jobs=-1) rfc_results, rfc_clfs = get_CV_results(adv_X, adv_y, rfc, rkf) print(roc_auc_score(adv_y, rfc_results))
code
128001996/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd target = 'yield' df1 = pd.read_csv(TRAIN_CSV) df1.rename({'Id': 'id'}, axis=1, inplace=True) df1['test'] = 0 df1['gen'] = 1 df2 = pd.read_csv(TEST_CSV) df2.rename({'Id': 'id'}, axis=1, inplace=True) df2['test'] = 1 df2['gen'] = 1 df3 = pd.read_csv(EXTERNAL_CSV) df3.rename({'Row#': 'id'}, axis=1, inplace=True) df3['test'] = 0 df3['gen'] = 0 df = pd.concat([df1, df2, df3]) df.id.fillna(-1, inplace=True) df.id = df.id.astype(int) df.reset_index(inplace=True) df.drop('index', axis=1, inplace=True) df.columns
code