path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18149558/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') print('Items set shape', items.shape)
code
18149558/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) test.head()
code
18149558/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') print('Shops set shape', shops.shape)
code
18149558/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test)
code
18149558/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) print('Train DS:', train.columns.values) print('Test DS:', test.columns.values) print('Item cats DS:', items_cats.columns.values) print('Items DS:', items.columns.values) print('Shops DS:', shops.columns.values)
code
18149558/cell_38
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) finalDf = train_df[['shop_id', 'item_id', 'date_block_num', 'm1', 'm2', 'item_cnt_month']].reset_index() finalDf.drop(['index'], axis=1, inplace=True) newTest = pd.merge_asof(test, finalDf, left_index=True, right_index=True, on=['shop_id', 'item_id']) y_train = finalDf['item_cnt_month'] newTest.drop(['item_cnt_month'], axis=1, inplace=True) x_train = finalDf[['shop_id', 'item_id', 'm1', 'm2']] x_test = newTest[['shop_id', 'item_id', 'm1', 'm2']] x_test.shape x_test_reshaped = x_test.values.reshape((x_test.values.shape[0], 1, x_test.values.shape[1])) x_test_reshaped.shape
code
18149558/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train.head()
code
18149558/cell_31
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) finalDf = train_df[['shop_id', 'item_id', 'date_block_num', 'm1', 'm2', 'item_cnt_month']].reset_index() finalDf.drop(['index'], axis=1, inplace=True) newTest = pd.merge_asof(test, finalDf, left_index=True, right_index=True, on=['shop_id', 'item_id']) newTest.head()
code
18149558/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test)
code
18149558/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') train.columns.values
code
18149558/cell_27
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) train_df.head()
code
18149558/cell_37
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) finalDf = train_df[['shop_id', 'item_id', 'date_block_num', 'm1', 'm2', 'item_cnt_month']].reset_index() finalDf.drop(['index'], axis=1, inplace=True) newTest = pd.merge_asof(test, finalDf, left_index=True, right_index=True, on=['shop_id', 'item_id']) y_train = finalDf['item_cnt_month'] newTest.drop(['item_cnt_month'], axis=1, inplace=True) x_train = finalDf[['shop_id', 'item_id', 'm1', 'm2']] x_test = newTest[['shop_id', 'item_id', 'm1', 'm2']] x_test.shape
code
18149558/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train)
code
18149558/cell_5
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') print('Testing set shape', test.shape)
code
121151039/cell_13
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(X_train, y_train)
code
121151039/cell_9
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler import pandas as pd data = pd.read_csv('/kaggle/input/wind-turbine-scada-dataset/T1.csv') data = data[data['LV ActivePower (kW)'] > 0] data = data.dropna() data = pd.get_dummies(data) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() data[['Wind Speed (m/s)', 'Wind Direction (°)']] = scaler.fit_transform(data[['Wind Speed (m/s)', 'Wind Direction (°)']]) print(data.head()) print(data.describe())
code
121151039/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('/kaggle/input/wind-turbine-scada-dataset/T1.csv') print(data.head()) print(data.describe()) print(data.info())
code
88085617/cell_21
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df clf.best_score_ clf.best_params_ from sklearn.model_selection import RandomizedSearchCV clf = RandomizedSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10, n_iter=4) clf.fit(iris.data, iris.target) clf.cv_results_
code
88085617/cell_13
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result)
code
88085617/cell_23
[ "text_html_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df clf.best_score_ clf.best_params_ from sklearn.model_selection import RandomizedSearchCV clf = RandomizedSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10, n_iter=4) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df[['param_C', 'param_kernel', 'param_max_iter', 'mean_test_score']] clf.best_params_
code
88085617/cell_19
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df clf.best_score_ clf.best_params_
code
88085617/cell_7
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df
code
88085617/cell_18
[ "text_html_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df clf.best_score_
code
88085617/cell_15
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_
code
88085617/cell_16
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df
code
88085617/cell_17
[ "text_html_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df df[['param_C', 'param_kernel', 'param_max_iter', 'mean_test_score']]
code
88085617/cell_14
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores
code
88085617/cell_22
[ "text_plain_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RandomizedSearchCV from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) result = cross_val_score(svm.SVC(kernel='rbf', C=5, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result) kernels = ['rbf', 'linear'] C = [1, 10, 20, 30] avg_scores = {} for kval in kernels: for cval in C: cv_scores = cross_val_score(svm.SVC(kernel=kval, C=cval), iris.data, iris.target, cv=5) avg_scores[kval + '_' + str(cval)] = np.mean(cv_scores) avg_scores from sklearn.model_selection import GridSearchCV clf = GridSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df clf.best_score_ clf.best_params_ from sklearn.model_selection import RandomizedSearchCV clf = RandomizedSearchCV(svm.SVC(gamma='auto'), {'C': [1, 5, 10, 7, 3, 12], 'kernel': ['rbf', 'linear', 'sigmoid', 'poly'], 'max_iter': [1, 3, 5, 7, 9, 12, 25, 50, 100]}, cv=10, n_iter=4) clf.fit(iris.data, iris.target) clf.cv_results_ df = pd.DataFrame(clf.cv_results_) df[['param_C', 'param_kernel', 'param_max_iter', 'mean_test_score']]
code
88085617/cell_10
[ "text_html_output_1.png" ]
from sklearn import svm, datasets import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df
code
88085617/cell_12
[ "text_html_output_1.png" ]
from sklearn import svm, datasets from sklearn.model_selection import cross_val_score import numpy as np import pandas as pd from sklearn import svm, datasets iris = datasets.load_iris() df = pd.DataFrame(iris.data, columns=iris.feature_names) df['target'] = iris.target df['target'] = df['target'].apply(lambda x: iris.target_names[x]) result = cross_val_score(svm.SVC(kernel='linear', C=10, gamma='auto'), iris.data, iris.target, cv=5) np.mean(result)
code
33116172/cell_13
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape nRowsRead = 1000 df2 = pd.read_csv('/kaggle/input/column_3C.csv', delimiter=',', nrows=nRowsRead) df2.dataframeName = 'column_3C.csv' nRow, nCol = df2.shape df2.head(5)
code
33116172/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape df1.head(5)
code
33116172/cell_25
[ "text_plain_output_1.png" ]
(y_test.shape, X_test.shape) X_test.min()
code
33116172/cell_23
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score import xgboost as xgb import xgboost as xgb model = xgb.XGBClassifier() model.fit(X_train, y_train) model.save_model('model.bst') y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] accuracy = accuracy_score(y_test, predictions) print('Accuracy: %.2f%%' % (accuracy * 100.0))
code
33116172/cell_30
[ "text_plain_output_1.png" ]
from platform import python_version from platform import python_version print(python_version())
code
33116172/cell_29
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape from sklearn.model_selection import train_test_split from sklearn import model_selection from sklearn.metrics import accuracy_score X = df1[['pelvic_incidence', 'pelvic_tilt', 'lumbar_lordosis_angle', 'sacral_slope', 'pelvic_radius', 'degree_spondylolisthesis']] Y = df1['class'] seed = 2020 test_size = 0.33 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) import xgboost as xgb model = xgb.XGBClassifier() model.fit(X_train, y_train) model.save_model('model.bst') y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] accuracy = accuracy_score(y_test, predictions) from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score model = xgb.XGBClassifier() kfold = KFold(n_splits=10, random_state=2020) results = cross_val_score(model, X, Y, cv=kfold) xgb.__version__
code
33116172/cell_26
[ "text_plain_output_1.png" ]
(y_test.shape, X_test.shape) X_test.min() X_test.max()
code
33116172/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape nRowsRead = 1000 df2 = pd.read_csv('/kaggle/input/column_3C.csv', delimiter=',', nrows=nRowsRead) df2.dataframeName = 'column_3C.csv' nRow, nCol = df2.shape print(f'There are {nRow} rows and {nCol} columns')
code
33116172/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape print(f'There are {nRow} rows and {nCol} columns')
code
33116172/cell_28
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape from sklearn.model_selection import train_test_split from sklearn import model_selection from sklearn.metrics import accuracy_score X = df1[['pelvic_incidence', 'pelvic_tilt', 'lumbar_lordosis_angle', 'sacral_slope', 'pelvic_radius', 'degree_spondylolisthesis']] Y = df1['class'] seed = 2020 test_size = 0.33 X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed) import xgboost as xgb model = xgb.XGBClassifier() model.fit(X_train, y_train) model.save_model('model.bst') y_pred = model.predict(X_test) predictions = [round(value) for value in y_pred] accuracy = accuracy_score(y_test, predictions) from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_score model = xgb.XGBClassifier() kfold = KFold(n_splits=10, random_state=2020) results = cross_val_score(model, X, Y, cv=kfold) print('Accuracy: %.2f%% (%.2f%%)' % (results.mean() * 100, results.std() * 100))
code
33116172/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns nRowsRead = 1000 df1 = pd.read_csv('/kaggle/input/column_2C.csv', delimiter=',', nrows=nRowsRead) df1.dataframeName = 'column_2C.csv' nRow, nCol = df1.shape nRowsRead = 1000 df2 = pd.read_csv('/kaggle/input/column_3C.csv', delimiter=',', nrows=nRowsRead) df2.dataframeName = 'column_3C.csv' nRow, nCol = df2.shape sns.pairplot(df2, hue='class', size=3, diag_kind='kde')
code
33116172/cell_24
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
(y_test.shape, X_test.shape)
code
33116172/cell_27
[ "text_plain_output_1.png" ]
(y_test.shape, X_test.shape) X_test.min() X_test.max() X_test.mean()
code
72075480/cell_42
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False) text = ' '.join((i for i in df.Review)) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text) plt.axis('off') wordcloud.to_file('wordcloud.png') df.head()
code
72075480/cell_21
[ "text_html_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'].apply(lambda x: TextBlob(x).words).head()
code
72075480/cell_9
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape
code
72075480/cell_34
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False) tf[tf['tf'] > 25].plot.bar(x='words', y='tf') plt.show()
code
72075480/cell_30
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf['words'].nunique()
code
72075480/cell_40
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from nltk.sentiment import SentimentIntensityAnalyzer from textblob import Word, TextBlob from warnings import filterwarnings from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False) text = ' '.join((i for i in df.Review)) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text) plt.axis('off') wordcloud.to_file('wordcloud.png') sia = SentimentIntensityAnalyzer() sia.polarity_scores('The food was awesome') df['Review'].apply(lambda x: x.upper()) df['Review'][0:10].apply(lambda x: sia.polarity_scores(x))
code
72075480/cell_29
[ "text_html_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape
code
72075480/cell_39
[ "image_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False) text = ' '.join((i for i in df.Review)) wordcloud = WordCloud().generate(text) plt.axis('off') wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text) plt.axis('off') wordcloud.to_file('wordcloud.png') df['Review'].apply(lambda x: x.upper())
code
72075480/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72075480/cell_7
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.head()
code
72075480/cell_32
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False)
code
72075480/cell_28
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.head()
code
72075480/cell_8
[ "text_html_output_1.png" ]
from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.info()
code
72075480/cell_38
[ "text_html_output_1.png" ]
from nltk.sentiment import SentimentIntensityAnalyzer sia = SentimentIntensityAnalyzer() sia.polarity_scores('The food was awesome')
code
72075480/cell_3
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import matplotlib.pyplot as plt import numpy as np import pandas as pd from PIL import Image from nltk.corpus import stopwords from nltk.sentiment import SentimentIntensityAnalyzer from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score, GridSearchCV, cross_validate from sklearn.preprocessing import LabelEncoder from textblob import Word, TextBlob from wordcloud import WordCloud
code
72075480/cell_31
[ "text_html_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf['tf'].describe([0.05, 0.1, 0.25, 0.5, 0.75, 0.8, 0.9, 0.95, 0.99]).T
code
72075480/cell_24
[ "text_plain_output_1.png" ]
from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape df['Review'].head()
code
72075480/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from warnings import filterwarnings import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape df.head()
code
72075480/cell_36
[ "text_plain_output_1.png" ]
from textblob import Word, TextBlob from warnings import filterwarnings from wordcloud import WordCloud import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) filterwarnings('ignore') pd.set_option('display.max_columns', None) pd.set_option('display.width', 200) pd.set_option('display.float_format', lambda x: '%.2f' % x) df = pd.read_csv('../input/reviews/Restaurant_Reviews.tsv', delimiter='\t') df.shape drops = pd.Series(' '.join(df['Review']).split()).value_counts()[-250:] df['Review'] = df['Review'].apply(lambda x: ' '.join((x for x in x.split() if x not in drops))) df['Review'] = df['Review'].apply(lambda x: ' '.join([Word(word).lemmatize() for word in x.split()])) tf = df['Review'].apply(lambda x: pd.value_counts(x.split(' '))).sum(axis=0).reset_index() tf.columns = ['words', 'tf'] tf.shape tf.sort_values('tf', ascending=False) text = ' '.join((i for i in df.Review)) wordcloud = WordCloud().generate(text) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show() wordcloud = WordCloud(max_font_size=50, max_words=100, background_color='white').generate(text) plt.figure() plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show() wordcloud.to_file('wordcloud.png')
code
2014797/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True)
code
2014797/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train.head(5)
code
2014797/cell_30
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import cross_val_score import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') survived = train[train['Survived'] == 1] died = train[train['Survived'] == 0] def category_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df cut_points = [0, 5, 12, 18, 35, 60, 100] label_names = ['Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train = category_age(train, cut_points, label_names) test = category_age(test, cut_points, label_names) pivot = train.pivot_table(index='Age_categories', values='Survived') def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for column in ['Pclass', 'Sex', 'Age_categories']: train = create_dummies(train, column) test = create_dummies(test, column) train = train.drop(train[['Pclass', 'Sex', 'Age', 'Age_categories']], axis=1) test = test.drop(test[['Pclass', 'Sex', 'Age', 'Age_categories']], axis=1) lr = LogisticRegression() train_y = train['Survived'] train_x = train.drop('Survived', axis=1) scores = cross_val_score(lr, train_x, train_y, cv=10) scores.sort() accuracy = scores.mean() print(scores) print(accuracy)
code
2014797/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') sex_variable.plot.bar() plt.show()
code
2014797/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') survived = train[train['Survived'] == 1] died = train[train['Survived'] == 0] def category_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df cut_points = [0, 5, 12, 18, 35, 60, 100] label_names = ['Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train = category_age(train, cut_points, label_names) test = category_age(test, cut_points, label_names) pivot = train.pivot_table(index='Age_categories', values='Survived') def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for column in ['Pclass', 'Sex', 'Age_categories']: train = create_dummies(train, column) test = create_dummies(test, column)
code
2014797/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) train.head()
code
2014797/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv')
code
2014797/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
code
2014797/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') survived = train[train['Survived'] == 1] died = train[train['Survived'] == 0] def category_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df cut_points = [0, 5, 12, 18, 35, 60, 100] label_names = ['Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train = category_age(train, cut_points, label_names) test = category_age(test, cut_points, label_names) pivot = train.pivot_table(index='Age_categories', values='Survived') pivot.plot.bar() plt.show()
code
2014797/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) train.head()
code
2014797/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') survived = train[train['Survived'] == 1] died = train[train['Survived'] == 0] survived['Age'].plot.hist(alpha=0.5, color='red', bins=50) died['Age'].plot.hist(alpha=0.5, color='blue', bins=50) plt.legend(['Survived', 'Died']) plt.show()
code
2014797/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train.info()
code
2014797/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('...train.csv') test = pd.read_csv('...test.csv') train = train.drop(train[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) test = test.drop(test[['Cabin', 'Embarked', 'Name', 'Ticket']], axis=1) train.dropna(axis=0, how='any', inplace=True) corr = train.corr() train.drop(train[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) test.drop(test[['Fare', 'SibSp', 'Parch']], axis=1, inplace=True) sex_variable = train.pivot_table(index='Sex', values='Survived') survived = train[train['Survived'] == 1] died = train[train['Survived'] == 0] def category_age(df, cut_points, label_names): df['Age'] = df['Age'].fillna(-0.5) df['Age_categories'] = pd.cut(df['Age'], cut_points, labels=label_names) return df cut_points = [0, 5, 12, 18, 35, 60, 100] label_names = ['Infant', 'Child', 'Teenager', 'Young Adult', 'Adult', 'Senior'] train = category_age(train, cut_points, label_names) test = category_age(test, cut_points, label_names) pivot = train.pivot_table(index='Age_categories', values='Survived') def create_dummies(df, column_name): dummies = pd.get_dummies(df[column_name], prefix=column_name) df = pd.concat([df, dummies], axis=1) return df for column in ['Pclass', 'Sex', 'Age_categories']: train = create_dummies(train, column) test = create_dummies(test, column) train = train.drop(train[['Pclass', 'Sex', 'Age', 'Age_categories']], axis=1) test = test.drop(test[['Pclass', 'Sex', 'Age', 'Age_categories']], axis=1)
code
17131726/cell_9
[ "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') names_col = list(train_df.columns.values) new_names_col = map(lambda name: name.strip(), names_col) train_df.columns = new_names_col list(train_df.columns.values) train_df.isna().sum() from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() train_df.team_1_name = labelEncoder.fit_transform(train_df.team_1_name) train_df.team_2_name = labelEncoder.fit_transform(train_df.team_2_name) train_df.drop(['id', 'queue_id', 'team_1_win', 'team_2_win', 'winning_team'], axis=1) data = train_df.game_duration values, base = np.histogram(data, bins=40) cumulative = np.cumsum(values) plt.plot(base[:-1], cumulative, c='blue') plt.show()
code
17131726/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') names_col = list(train_df.columns.values) new_names_col = map(lambda name: name.strip(), names_col) train_df.columns = new_names_col list(train_df.columns.values)
code
17131726/cell_6
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') names_col = list(train_df.columns.values) new_names_col = map(lambda name: name.strip(), names_col) train_df.columns = new_names_col list(train_df.columns.values) train_df.isna().sum() from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() train_df.team_1_name = labelEncoder.fit_transform(train_df.team_1_name) train_df.team_2_name = labelEncoder.fit_transform(train_df.team_2_name) train_df.head()
code
17131726/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') train_df.head()
code
17131726/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
17131726/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') names_col = list(train_df.columns.values) new_names_col = map(lambda name: name.strip(), names_col) train_df.columns = new_names_col list(train_df.columns.values) train_df.isna().sum() from sklearn.preprocessing import LabelEncoder labelEncoder = LabelEncoder() train_df.team_1_name = labelEncoder.fit_transform(train_df.team_1_name) train_df.team_2_name = labelEncoder.fit_transform(train_df.team_2_name) train_df.drop(['id', 'queue_id', 'team_1_win', 'team_2_win', 'winning_team'], axis=1) train_def.head()
code
17131726/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/matchesheader.csv') names_col = list(train_df.columns.values) new_names_col = map(lambda name: name.strip(), names_col) train_df.columns = new_names_col list(train_df.columns.values) train_df.isna().sum()
code
128022699/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd import pandas as pd df = pd.read_csv('/content/drive/MyDrive/PRML LABs/PRML Major Project/audio_dataset.csv') df
code
128022699/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import os import re import pandas as pd import librosa import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split, cross_val_score from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score from scipy.fft import fft import seaborn as sns import matplotlib.pyplot as plt import os import re import pandas as pd import librosa import numpy as np from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score, classification_report from scipy.fft import fft import seaborn as sns import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim from torch.utils.data import DataLoader, random_split, TensorDataset
code
90151187/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) tmp = train.groupby('customer_id').t_dat.max().reset_index() tmp.columns = ['customer_id', 'max_dat'] train = train.merge(tmp, on=['customer_id'], how='left') train['diff_dat'] = (train.max_dat - train.t_dat).dt.days train = train.loc[train['diff_dat'] <= 6] tmp = train.groupby(['customer_id', 'article_id'])['t_dat'].agg('count').reset_index() tmp.columns = ['customer_id', 'article_id', 'ct'] train = train.merge(tmp, on=['customer_id', 'article_id'], how='left') train = train.sort_values(['ct', 't_dat'], ascending=False) train = train.drop_duplicates(['customer_id', 'article_id']) train = train.sort_values(['ct', 't_dat'], ascending=False) import pandas as pd, numpy as np train = train.to_pandas() pairs = np.load('../input/hmitempairs/pairs_cudf.npy', allow_pickle=True).item() train['article_id2'] = train.article_id.map(pairs) train2 = train[['customer_id', 'article_id2']].copy() train2 = train2.loc[train2.article_id2.notnull()] train2 = train2.drop_duplicates(['customer_id', 'article_id2']) train2 = train2.rename({'article_id2': 'article_id'}, axis=1) train = train[['customer_id', 'article_id']] train = pd.concat([train, train2], axis=0, ignore_index=True) train.article_id = train.article_id.astype('int32') train = train.drop_duplicates(['customer_id', 'article_id']) train.article_id = ' 0' + train.article_id.astype('str') preds = cudf.DataFrame(train.groupby('customer_id').article_id.sum().reset_index()) preds.columns = ['customer_id', 'prediction'] preds.head()
code
90151187/cell_4
[ "text_html_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) print(train.shape) train.head()
code
90151187/cell_6
[ "text_plain_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) tmp = train.groupby('customer_id').t_dat.max().reset_index() tmp.columns = ['customer_id', 'max_dat'] train = train.merge(tmp, on=['customer_id'], how='left') train['diff_dat'] = (train.max_dat - train.t_dat).dt.days train = train.loc[train['diff_dat'] <= 6] print('Train shape:', train.shape)
code
90151187/cell_8
[ "text_html_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) tmp = train.groupby('customer_id').t_dat.max().reset_index() tmp.columns = ['customer_id', 'max_dat'] train = train.merge(tmp, on=['customer_id'], how='left') train['diff_dat'] = (train.max_dat - train.t_dat).dt.days train = train.loc[train['diff_dat'] <= 6] tmp = train.groupby(['customer_id', 'article_id'])['t_dat'].agg('count').reset_index() tmp.columns = ['customer_id', 'article_id', 'ct'] train = train.merge(tmp, on=['customer_id', 'article_id'], how='left') train = train.sort_values(['ct', 't_dat'], ascending=False) train = train.drop_duplicates(['customer_id', 'article_id']) train = train.sort_values(['ct', 't_dat'], ascending=False) train.head()
code
90151187/cell_15
[ "text_plain_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) tmp = train.groupby('customer_id').t_dat.max().reset_index() tmp.columns = ['customer_id', 'max_dat'] train = train.merge(tmp, on=['customer_id'], how='left') train['diff_dat'] = (train.max_dat - train.t_dat).dt.days train = train.loc[train['diff_dat'] <= 6] tmp = train.groupby(['customer_id', 'article_id'])['t_dat'].agg('count').reset_index() tmp.columns = ['customer_id', 'article_id', 'ct'] train = train.merge(tmp, on=['customer_id', 'article_id'], how='left') train = train.sort_values(['ct', 't_dat'], ascending=False) train = train.drop_duplicates(['customer_id', 'article_id']) train = train.sort_values(['ct', 't_dat'], ascending=False) import pandas as pd, numpy as np train = train.to_pandas() pairs = np.load('../input/hmitempairs/pairs_cudf.npy', allow_pickle=True).item() train['article_id2'] = train.article_id.map(pairs) train2 = train[['customer_id', 'article_id2']].copy() train2 = train2.loc[train2.article_id2.notnull()] train2 = train2.drop_duplicates(['customer_id', 'article_id2']) train2 = train2.rename({'article_id2': 'article_id'}, axis=1) train = train[['customer_id', 'article_id']] train = pd.concat([train, train2], axis=0, ignore_index=True) train.article_id = train.article_id.astype('int32') train = train.drop_duplicates(['customer_id', 'article_id']) train.article_id = ' 0' + train.article_id.astype('str') preds = cudf.DataFrame(train.groupby('customer_id').article_id.sum().reset_index()) preds.columns = ['customer_id', 'prediction'] train = cudf.read_parquet('train.pqt') train.t_dat = cudf.to_datetime(train.t_dat) train = train.loc[train.t_dat >= cudf.to_datetime('2020-09-16')] top12 = ' 0' + ' 0'.join(train.article_id.value_counts().to_pandas().index.astype('str')[:12]) print("Last week's top 12 popular items:") print(top12)
code
90151187/cell_17
[ "text_html_output_1.png" ]
import cudf train = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') train['customer_id'] = train['customer_id'].str[-16:].str.hex_to_int().astype('int64') train['article_id'] = train.article_id.astype('int32') train.t_dat = cudf.to_datetime(train.t_dat) train = train[['t_dat', 'customer_id', 'article_id']] train.to_parquet('train.pqt', index=False) tmp = train.groupby('customer_id').t_dat.max().reset_index() tmp.columns = ['customer_id', 'max_dat'] train = train.merge(tmp, on=['customer_id'], how='left') train['diff_dat'] = (train.max_dat - train.t_dat).dt.days train = train.loc[train['diff_dat'] <= 6] tmp = train.groupby(['customer_id', 'article_id'])['t_dat'].agg('count').reset_index() tmp.columns = ['customer_id', 'article_id', 'ct'] train = train.merge(tmp, on=['customer_id', 'article_id'], how='left') train = train.sort_values(['ct', 't_dat'], ascending=False) train = train.drop_duplicates(['customer_id', 'article_id']) train = train.sort_values(['ct', 't_dat'], ascending=False) import pandas as pd, numpy as np train = train.to_pandas() pairs = np.load('../input/hmitempairs/pairs_cudf.npy', allow_pickle=True).item() train['article_id2'] = train.article_id.map(pairs) train2 = train[['customer_id', 'article_id2']].copy() train2 = train2.loc[train2.article_id2.notnull()] train2 = train2.drop_duplicates(['customer_id', 'article_id2']) train2 = train2.rename({'article_id2': 'article_id'}, axis=1) train = train[['customer_id', 'article_id']] train = pd.concat([train, train2], axis=0, ignore_index=True) train.article_id = train.article_id.astype('int32') train = train.drop_duplicates(['customer_id', 'article_id']) train.article_id = ' 0' + train.article_id.astype('str') preds = cudf.DataFrame(train.groupby('customer_id').article_id.sum().reset_index()) preds.columns = ['customer_id', 'prediction'] train = cudf.read_parquet('train.pqt') train.t_dat = cudf.to_datetime(train.t_dat) train = train.loc[train.t_dat >= cudf.to_datetime('2020-09-16')] top12 = ' 0' + ' 0'.join(train.article_id.value_counts().to_pandas().index.astype('str')[:12]) sub = cudf.read_csv('../input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') sub = sub[['customer_id']] sub['customer_id_2'] = sub['customer_id'].str[-16:].str.hex_to_int().astype('int64') sub = sub.merge(preds.rename({'customer_id': 'customer_id_2'}, axis=1), on='customer_id_2', how='left').fillna('') del sub['customer_id_2'] sub.prediction = sub.prediction + top12 sub.prediction = sub.prediction.str.strip() sub.prediction = sub.prediction.str[:131] sub.to_csv(f'submission.csv', index=False) sub.head()
code
72120060/cell_40
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor from lightgbm import LGBMRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.neighbors import KNeighborsRegressor from sklearn.neural_network import MLPRegressor from xgboost import XGBRegressor import skopt knn_reg = KNeighborsRegressor(n_jobs=-1) mlp_reg = MLPRegressor() rf_reg = RandomForestRegressor(n_jobs=-1) gb_reg = GradientBoostingRegressor() xgb_reg = XGBRegressor(n_jobs=-1) lgbm_reg = LGBMRegressor(n_jobs=-1) cat_reg = CatBoostRegressor() search_space = [skopt.space.Integer(4, 12, name='max_depth'), skopt.space.Integer(50, 200, name='n_estimators'), skopt.space.Integer(17, 24, name='max_features'), skopt.space.Real(0.0, 1.0, name='min_impurity_decrease'), skopt.space.Categorical(categories=[True, False], name='bootstrap')] evaluator = Params_Evaluate(X_train, X_val, Y_train, Y_val) evaluator.select_model(rf_reg) @skopt.utils.use_named_args(search_space) def objective(**params): return evaluator.evaluate_params(params) def to_named_params(results, search_space): params = results.x param_dict = {} params_list = [(dimension.name, param) for dimension, param in zip(search_space, params)] for item in params_list: param_dict[item[0]] = item[1] return param_dict search_space_xgb = [skopt.space.Integer(4, 12, name='max_depth'), skopt.space.Real(0.0, 1.0, name='eta'), skopt.space.Real(0.0, 1.0, name='subsample'), skopt.space.Categorical(categories=['gbtree', 'dart'], name='booster')] best_params_gxb = to_named_params(results_xgb, search_space_xgb) best_xgb_reg = xgb_reg.set_params(**best_params_gxb) best_xgb_reg
code
72120060/cell_39
[ "text_plain_output_1.png" ]
results_xgb = skopt.forest_minimize(objective, search_space_xgb, **HPO_params)
code
72120060/cell_41
[ "text_plain_output_1.png" ]
import skopt search_space = [skopt.space.Integer(4, 12, name='max_depth'), skopt.space.Integer(50, 200, name='n_estimators'), skopt.space.Integer(17, 24, name='max_features'), skopt.space.Real(0.0, 1.0, name='min_impurity_decrease'), skopt.space.Categorical(categories=[True, False], name='bootstrap')] def to_named_params(results, search_space): params = results.x param_dict = {} params_list = [(dimension.name, param) for dimension, param in zip(search_space, params)] for item in params_list: param_dict[item[0]] = item[1] return param_dict best_params = to_named_params(results, search_space) best_params
code
72120060/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72120060/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from warnings import filterwarnings from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.neural_network import MLPRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.ensemble import GradientBoostingRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from catboost import CatBoostRegressor from warnings import filterwarnings filterwarnings('ignore', category=DeprecationWarning) filterwarnings('ignore', category=FutureWarning) filterwarnings('ignore', category=UserWarning)
code
72120060/cell_16
[ "text_plain_output_1.png" ]
from catboost import CatBoostRegressor from lightgbm import LGBMRegressor from sklearn.ensemble import GradientBoostingRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.neighbors import KNeighborsRegressor from sklearn.neural_network import MLPRegressor from xgboost import XGBRegressor import matplotlib.pyplot as plt import numpy as np # linear algebra def check_rmse(model, x_val, y_val): pred = model.predict(x_val) return np.sqrt(mean_squared_error(y_val, pred)) knn_reg = KNeighborsRegressor(n_jobs=-1) mlp_reg = MLPRegressor() rf_reg = RandomForestRegressor(n_jobs=-1) gb_reg = GradientBoostingRegressor() xgb_reg = XGBRegressor(n_jobs=-1) lgbm_reg = LGBMRegressor(n_jobs=-1) cat_reg = CatBoostRegressor() models = [knn_reg, mlp_reg, rf_reg, gb_reg, xgb_reg, lgbm_reg, cat_reg] models_name = ['knn_reg', 'mlp_reg', 'rf_reg', 'gb_reg', 'xgb_reg', 'lgbm_reg', 'cat_reg'] rmse_error = [] for i, model in enumerate(models): model.fit(X_train, Y_train) rmse = check_rmse(model, X_val, Y_val) rmse_error.append(rmse) plt.barh(models_name, rmse_error) plt.ylabel('Models') plt.xlabel('RMSE') plt.show()
code
72120060/cell_27
[ "image_output_1.png" ]
code
2007618/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt def priceOverTime(data, label): """Plot price over time""" priceOverTime(newdf3, 'California') priceOverTime(newdf4, 'Colorado') priceOverTime(newdf5, 'Michigan') def priceOverTime2(data, label): data.groupby(data.Date.dt.year)['MedianSoldPrice_AllHomes'].mean().plot(kind='bar', figsize=(10, 6), color='grey', edgecolor='black', linewidth=2) plt.suptitle(label, fontsize=12) plt.ylabel('MedianSoldPrice_AllHomes') plt.xlabel('Year') plt.show() priceOverTime2(newdf6, 'San Francisco') priceOverTime2(newdf7, 'Denver') priceOverTime2(newdf8, 'Detroit')
code
2007618/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt def priceOverTime(data, label): """Plot price over time""" data.groupby(newdf.Date.dt.year)['MedianSoldPrice_AllHomes'].mean().plot(kind='bar', figsize=(10, 6), color='grey', edgecolor='black', linewidth=2) plt.suptitle(label, fontsize=12) plt.ylabel('MedianSoldPrice_AllHomes') plt.xlabel('Year') plt.show() priceOverTime(newdf3, 'California') priceOverTime(newdf4, 'Colorado') priceOverTime(newdf5, 'Michigan')
code
2007618/cell_3
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('ggplot') import seaborn as sns state_data = '../input/State_time_series.csv' df = pd.read_csv(state_data) city_data = '../input/City_time_series.csv' dfCity = pd.read_csv(city_data) State_house = pd.read_csv('../input/State_time_series.csv', parse_dates=['Date']) States = ['California', 'Colorado', 'Michigan'] newdf = df.loc[df['RegionName'].isin(States)] newdf.Date = pd.to_datetime(newdf.Date) newdf2 = newdf.loc[newdf['Date'].dt.year == 2016] newdf3 = df.loc[df['RegionName'] == 'California'] newdf4 = df.loc[df['RegionName'] == 'Colorado'] newdf5 = df.loc[df['RegionName'] == 'Michigan'] newdf6 = dfCity.loc[dfCity['RegionName'] == 'san_franciscosan_franciscoca'] newdf6.Date = pd.to_datetime(newdf6.Date) newdf7 = dfCity.loc[dfCity['RegionName'] == 'denverdenverco'] newdf7.Date = pd.to_datetime(newdf7.Date) newdf8 = dfCity.loc[dfCity['RegionName'] == 'detroitwaynemi'] newdf8.Date = pd.to_datetime(newdf8.Date)
code