path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73067082/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv') X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv') """ Checking for missing data """ missing_values_count = X_full.isnull().sum() total_cells = np.product(X_full.shape) total_missing = missing_values_count.sum() percent_missing = total_missing / total_cells * 100 X_full.dropna(axis=0, subset=['target'], inplace=True) y = X_full.target X_full.drop(['target'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X_full.columns if X_full[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid_full[col]).issubset(set(X_train_full[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) object_nunique = list(map(lambda col: X_full[col].nunique(), object_cols)) d = dict(zip(object_cols, object_nunique)) sorted(d.items(), key=lambda x: x[1])
code
73067082/cell_18
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from xgboost import XGBRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv') X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv') """ Checking for missing data """ missing_values_count = X_full.isnull().sum() total_cells = np.product(X_full.shape) total_missing = missing_values_count.sum() percent_missing = total_missing / total_cells * 100 X_full.dropna(axis=0, subset=['target'], inplace=True) y = X_full.target X_full.drop(['target'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X_full.columns if X_full[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid_full[col]).issubset(set(X_train_full[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) object_nunique = list(map(lambda col: X_full[col].nunique(), object_cols)) d = dict(zip(object_cols, object_nunique)) sorted(d.items(), key=lambda x: x[1]) label_encoder_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] one_hotting_cols = list(set(object_cols) - set(label_encoder_cols)) numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = label_encoder_cols + one_hotting_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() X_train_prepared = X_train_full[numerical_cols].copy() X_valid_prepared = X_valid_full[numerical_cols].copy() X_test_prepared = X_test_full[numerical_cols].copy() X_train_prepared = pd.concat([X_train_new_columns, X_train_prepared], axis=1) X_valid_prepared = pd.concat([X_valid_new_columns, X_valid_prepared], axis=1) X_test_prepared = pd.concat([X_test_new_columns, X_test_prepared], axis=1) X_train_prepared = X_train_prepared.drop(['id'], axis=1) X_valid_prepared = X_valid_prepared.drop(['id'], axis=1) X_test_prepared = X_test_prepared.drop(['id'], axis=1) xgb_gpu_model = XGBRegressor(random_state=1, n_jobs=4, n_estimators=5000, tree_method='gpu_hist', learning_rate=0.01, subsample=0.9, max_depth=5, colsample_bytree=0.5, reg_alpha=30, eval_metric='rmse') xgb_gpu_model.fit(X_train_prepared, y_train) predict_y_xgb = xgb_gpu_model.predict(X_valid_prepared) mse = mean_squared_error(predict_y_xgb, y_valid, squared=False) print('Mean Squared Error:', mse)
code
73067082/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv') X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv') """ Checking for missing data """ missing_values_count = X_full.isnull().sum() total_cells = np.product(X_full.shape) total_missing = missing_values_count.sum() percent_missing = total_missing / total_cells * 100 X_full.dropna(axis=0, subset=['target'], inplace=True) y = X_full.target X_full.drop(['target'], axis=1, inplace=True) X_train_full, X_valid_full, y_train, y_valid = train_test_split(X_full, y, train_size=0.8, test_size=0.2, random_state=0) object_cols = [col for col in X_full.columns if X_full[col].dtype == 'object'] good_label_cols = [col for col in object_cols if set(X_valid_full[col]).issubset(set(X_train_full[col]))] bad_label_cols = list(set(object_cols) - set(good_label_cols)) object_nunique = list(map(lambda col: X_full[col].nunique(), object_cols)) d = dict(zip(object_cols, object_nunique)) sorted(d.items(), key=lambda x: x[1]) label_encoder_cols = [cname for cname in X_train_full.columns if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == 'object'] one_hotting_cols = list(set(object_cols) - set(label_encoder_cols)) numerical_cols = [cname for cname in X_train_full.columns if X_train_full[cname].dtype in ['int64', 'float64']] my_cols = label_encoder_cols + one_hotting_cols + numerical_cols X_train = X_train_full[my_cols].copy() X_valid = X_valid_full[my_cols].copy() X_test = X_test_full[my_cols].copy() X_train_prepared = X_train_full[numerical_cols].copy() X_valid_prepared = X_valid_full[numerical_cols].copy() X_test_prepared = X_test_full[numerical_cols].copy() X_train_prepared = pd.concat([X_train_new_columns, X_train_prepared], axis=1) X_valid_prepared = pd.concat([X_valid_new_columns, X_valid_prepared], axis=1) X_test_prepared = pd.concat([X_test_new_columns, X_test_prepared], axis=1) X_train_prepared = X_train_prepared.drop(['id'], axis=1) X_valid_prepared = X_valid_prepared.drop(['id'], axis=1) X_test_prepared = X_test_prepared.drop(['id'], axis=1) X_train_prepared.head()
code
73067082/cell_5
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) X_full = pd.read_csv('../input/30-days-of-ml/train.csv') X_test_full = pd.read_csv('../input/30-days-of-ml/test.csv') """ Checking for missing data """ missing_values_count = X_full.isnull().sum() print('Total missing values %d' % missing_values_count.sum()) total_cells = np.product(X_full.shape) total_missing = missing_values_count.sum() percent_missing = total_missing / total_cells * 100 print('Percent missing values %f' % percent_missing)
code
17120078/cell_13
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import SGDClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss from sklearn.metrics import roc_curve, auc from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import time import warnings data = pd.read_pickle('../input/515k-reviews-after-preprocessing/After_filling_Nans') df = pd.read_pickle('../input/515k-reviews-after-preprocessing/After preprocessing') summary = np.array(df.Summary) score = df['score'].values import time from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split start_time = time.time() best_params = [] parameters = {'alpha': [i for i in range(1, 100, 10)]} acc = [] score = list(score) for i in range(2000, 14000, 1000): vec = CountVectorizer(max_features=i) data = vec.fit_transform(summary) nb = MultinomialNB() clf = GridSearchCV(nb, parameters, cv=5) x_train, x_test, y_train, y_test = train_test_split(data, score, test_size=0.3, random_state=42) clf.fit(x_train, y_train) acc.append(100.0 * sum(clf.predict(x_test)) / len(clf.predict(x_test))) best_params.append(clf.best_params_) vec = 0 data = 0 ##Confusion matrix def show_confusion_matrix(C,class_labels=['0','1']): """ C: ndarray, shape (2,2) as given by scikit-learn confusion_matrix function class_labels: list of strings, default simply labels 0 and 1. Draws confusion matrix with associated metrics. """ import matplotlib.pyplot as plt import numpy as np assert C.shape == (2,2), "Confusion matrix should be from binary classification only." # true negative, false positive, false negative, true positive tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1]; NP = fn+tp # Num positive examples NN = tn+fp # Num negative examples N = NP+NN # Total num of examples fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) ax.imshow(C, interpolation='nearest', cmap=plt.cm.gray) # Draw the grid boxes ax.set_xlim(-0.5,2.5) ax.set_ylim(2.5,-0.5) ax.plot([-0.5,2.5],[0.5,0.5], '-k', lw=2) ax.plot([-0.5,2.5],[1.5,1.5], '-k', lw=2) ax.plot([0.5,0.5],[-0.5,2.5], '-k', lw=2) ax.plot([1.5,1.5],[-0.5,2.5], '-k', lw=2) # Set xlabels ax.set_xlabel('Predicted Label', fontsize=16) ax.set_xticks([0,1,2]) ax.set_xticklabels(class_labels + ['']) ax.xaxis.set_label_position('top') ax.xaxis.tick_top() # These coordinate might require some tinkering. Ditto for y, below. ax.xaxis.set_label_coords(0.34,1.06) # Set ylabels ax.set_ylabel('True Label', fontsize=16, rotation=90) ax.set_yticklabels(class_labels + [''],rotation=90) ax.set_yticks([0,1,2]) ax.yaxis.set_label_coords(-0.09,0.65) # Fill in initial metrics: tp, tn, etc... ax.text(0,0, 'True Neg: %d\n(Num Neg: %d)'%(tn,NN), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,1, 'False Neg: %d'%fn, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,0, 'False Pos: %d'%fp, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,1, 'True Pos: %d\n(Num Pos: %d)'%(tp,NP), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) # Fill in secondary metrics: accuracy, true pos rate, etc... ax.text(2,0, 'False Pos Rate: %.2f'%(fp / (fp+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,1, 'True Pos Rate: %.2f'%(tp / (tp+fn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,2, 'Accuracy: %.2f'%((tp+tn+0.)/N), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,2, 'Neg Pre Val: %.2f'%(1-fn/(fn+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,2, 'Pos Pred Val: %.2f'%(tp/(tp+fp+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) plt.tight_layout() plt.show() start_time = time.time() from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss score_Log_reg = [] y_pred = clf.predict(x_test) conf_NB = confusion_matrix(y_test, y_pred) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.xlim([0, 1]) plt.ylim([0, 1]) a = log_loss(y_test, probs) tn = conf_NB[0, 0] fp = conf_NB[0, 1] fn = conf_NB[1, 0] tp = conf_NB[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) tp = conf_NB[0][0] tn = conf_NB[1][1] import warnings from sklearn.linear_model import SGDClassifier warnings.filterwarnings('ignore') start_time = time.time() best_params_logreg = [] parameters = {'loss': ['log'], 'penalty': ['l1', 'l2', 'elasticnet'], 'alpha': [float(i) / 10 for i in range(1, 10, 1)], 'n_jobs': [-1]} warnings.filterwarnings('ignore') clf = SGDClassifier() clf = GridSearchCV(clf, parameters, cv=5) clf.fit(x_train, y_train) best_params_logreg.append(clf.best_params_) print('Best parameters for Logistic Regression are:', best_params_logreg) print('--- %s seconds ---' % (time.time() - start_time))
code
17120078/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import time data = pd.read_pickle('../input/515k-reviews-after-preprocessing/After_filling_Nans') df = pd.read_pickle('../input/515k-reviews-after-preprocessing/After preprocessing') summary = np.array(df.Summary) score = df['score'].values import time from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split start_time = time.time() best_params = [] parameters = {'alpha': [i for i in range(1, 100, 10)]} acc = [] score = list(score) for i in range(2000, 14000, 1000): vec = CountVectorizer(max_features=i) data = vec.fit_transform(summary) nb = MultinomialNB() clf = GridSearchCV(nb, parameters, cv=5) x_train, x_test, y_train, y_test = train_test_split(data, score, test_size=0.3, random_state=42) clf.fit(x_train, y_train) acc.append(100.0 * sum(clf.predict(x_test)) / len(clf.predict(x_test))) best_params.append(clf.best_params_) vec = 0 data = 0 print('--- %s seconds ---' % (time.time() - start_time))
code
17120078/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import time import re, nltk from nltk import word_tokenize from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer from collections import Counter from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17120078/cell_11
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss from sklearn.metrics import roc_curve, auc from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import time data = pd.read_pickle('../input/515k-reviews-after-preprocessing/After_filling_Nans') df = pd.read_pickle('../input/515k-reviews-after-preprocessing/After preprocessing') summary = np.array(df.Summary) score = df['score'].values import time from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split start_time = time.time() best_params = [] parameters = {'alpha': [i for i in range(1, 100, 10)]} acc = [] score = list(score) for i in range(2000, 14000, 1000): vec = CountVectorizer(max_features=i) data = vec.fit_transform(summary) nb = MultinomialNB() clf = GridSearchCV(nb, parameters, cv=5) x_train, x_test, y_train, y_test = train_test_split(data, score, test_size=0.3, random_state=42) clf.fit(x_train, y_train) acc.append(100.0 * sum(clf.predict(x_test)) / len(clf.predict(x_test))) best_params.append(clf.best_params_) vec = 0 data = 0 ##Confusion matrix def show_confusion_matrix(C,class_labels=['0','1']): """ C: ndarray, shape (2,2) as given by scikit-learn confusion_matrix function class_labels: list of strings, default simply labels 0 and 1. Draws confusion matrix with associated metrics. """ import matplotlib.pyplot as plt import numpy as np assert C.shape == (2,2), "Confusion matrix should be from binary classification only." # true negative, false positive, false negative, true positive tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1]; NP = fn+tp # Num positive examples NN = tn+fp # Num negative examples N = NP+NN # Total num of examples fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) ax.imshow(C, interpolation='nearest', cmap=plt.cm.gray) # Draw the grid boxes ax.set_xlim(-0.5,2.5) ax.set_ylim(2.5,-0.5) ax.plot([-0.5,2.5],[0.5,0.5], '-k', lw=2) ax.plot([-0.5,2.5],[1.5,1.5], '-k', lw=2) ax.plot([0.5,0.5],[-0.5,2.5], '-k', lw=2) ax.plot([1.5,1.5],[-0.5,2.5], '-k', lw=2) # Set xlabels ax.set_xlabel('Predicted Label', fontsize=16) ax.set_xticks([0,1,2]) ax.set_xticklabels(class_labels + ['']) ax.xaxis.set_label_position('top') ax.xaxis.tick_top() # These coordinate might require some tinkering. Ditto for y, below. ax.xaxis.set_label_coords(0.34,1.06) # Set ylabels ax.set_ylabel('True Label', fontsize=16, rotation=90) ax.set_yticklabels(class_labels + [''],rotation=90) ax.set_yticks([0,1,2]) ax.yaxis.set_label_coords(-0.09,0.65) # Fill in initial metrics: tp, tn, etc... ax.text(0,0, 'True Neg: %d\n(Num Neg: %d)'%(tn,NN), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,1, 'False Neg: %d'%fn, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,0, 'False Pos: %d'%fp, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,1, 'True Pos: %d\n(Num Pos: %d)'%(tp,NP), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) # Fill in secondary metrics: accuracy, true pos rate, etc... ax.text(2,0, 'False Pos Rate: %.2f'%(fp / (fp+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,1, 'True Pos Rate: %.2f'%(tp / (tp+fn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,2, 'Accuracy: %.2f'%((tp+tn+0.)/N), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,2, 'Neg Pre Val: %.2f'%(1-fn/(fn+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,2, 'Pos Pred Val: %.2f'%(tp/(tp+fp+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) plt.tight_layout() plt.show() start_time = time.time() from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss score_Log_reg = [] y_pred = clf.predict(x_test) conf_NB = confusion_matrix(y_test, y_pred) print('Confusion matrix:\n', conf_NB) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() a = log_loss(y_test, probs) print('The log loss for the Naive bayes is:', a) show_confusion_matrix(conf_NB, ['Negative', 'Positive']) tn = conf_NB[0, 0] fp = conf_NB[0, 1] fn = conf_NB[1, 0] tp = conf_NB[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) print('Precision :', precision) print('Recall :', recall) tp = conf_NB[0][0] tn = conf_NB[1][1] print('The accuracy is {} %'.format(round(100.0 * (tp + tn) / len(y_test), 2))) print('------------ %s seconds ------------' % (time.time() - start_time))
code
17120078/cell_16
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import SGDClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_curve, auc from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import time import warnings data = pd.read_pickle('../input/515k-reviews-after-preprocessing/After_filling_Nans') df = pd.read_pickle('../input/515k-reviews-after-preprocessing/After preprocessing') summary = np.array(df.Summary) score = df['score'].values import time from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split start_time = time.time() best_params = [] parameters = {'alpha': [i for i in range(1, 100, 10)]} acc = [] score = list(score) for i in range(2000, 14000, 1000): vec = CountVectorizer(max_features=i) data = vec.fit_transform(summary) nb = MultinomialNB() clf = GridSearchCV(nb, parameters, cv=5) x_train, x_test, y_train, y_test = train_test_split(data, score, test_size=0.3, random_state=42) clf.fit(x_train, y_train) acc.append(100.0 * sum(clf.predict(x_test)) / len(clf.predict(x_test))) best_params.append(clf.best_params_) vec = 0 data = 0 ##Confusion matrix def show_confusion_matrix(C,class_labels=['0','1']): """ C: ndarray, shape (2,2) as given by scikit-learn confusion_matrix function class_labels: list of strings, default simply labels 0 and 1. Draws confusion matrix with associated metrics. """ import matplotlib.pyplot as plt import numpy as np assert C.shape == (2,2), "Confusion matrix should be from binary classification only." # true negative, false positive, false negative, true positive tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1]; NP = fn+tp # Num positive examples NN = tn+fp # Num negative examples N = NP+NN # Total num of examples fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) ax.imshow(C, interpolation='nearest', cmap=plt.cm.gray) # Draw the grid boxes ax.set_xlim(-0.5,2.5) ax.set_ylim(2.5,-0.5) ax.plot([-0.5,2.5],[0.5,0.5], '-k', lw=2) ax.plot([-0.5,2.5],[1.5,1.5], '-k', lw=2) ax.plot([0.5,0.5],[-0.5,2.5], '-k', lw=2) ax.plot([1.5,1.5],[-0.5,2.5], '-k', lw=2) # Set xlabels ax.set_xlabel('Predicted Label', fontsize=16) ax.set_xticks([0,1,2]) ax.set_xticklabels(class_labels + ['']) ax.xaxis.set_label_position('top') ax.xaxis.tick_top() # These coordinate might require some tinkering. Ditto for y, below. ax.xaxis.set_label_coords(0.34,1.06) # Set ylabels ax.set_ylabel('True Label', fontsize=16, rotation=90) ax.set_yticklabels(class_labels + [''],rotation=90) ax.set_yticks([0,1,2]) ax.yaxis.set_label_coords(-0.09,0.65) # Fill in initial metrics: tp, tn, etc... ax.text(0,0, 'True Neg: %d\n(Num Neg: %d)'%(tn,NN), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,1, 'False Neg: %d'%fn, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,0, 'False Pos: %d'%fp, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,1, 'True Pos: %d\n(Num Pos: %d)'%(tp,NP), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) # Fill in secondary metrics: accuracy, true pos rate, etc... ax.text(2,0, 'False Pos Rate: %.2f'%(fp / (fp+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,1, 'True Pos Rate: %.2f'%(tp / (tp+fn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,2, 'Accuracy: %.2f'%((tp+tn+0.)/N), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,2, 'Neg Pre Val: %.2f'%(1-fn/(fn+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,2, 'Pos Pred Val: %.2f'%(tp/(tp+fp+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) plt.tight_layout() plt.show() start_time = time.time() from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss score_Log_reg = [] y_pred = clf.predict(x_test) conf_NB = confusion_matrix(y_test, y_pred) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.xlim([0, 1]) plt.ylim([0, 1]) a = log_loss(y_test, probs) tn = conf_NB[0, 0] fp = conf_NB[0, 1] fn = conf_NB[1, 0] tp = conf_NB[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) tp = conf_NB[0][0] tn = conf_NB[1][1] import warnings from sklearn.linear_model import SGDClassifier warnings.filterwarnings('ignore') start_time = time.time() best_params_logreg = [] parameters = {'loss': ['log'], 'penalty': ['l1', 'l2', 'elasticnet'], 'alpha': [float(i) / 10 for i in range(1, 10, 1)], 'n_jobs': [-1]} warnings.filterwarnings('ignore') clf = SGDClassifier() clf = GridSearchCV(clf, parameters, cv=5) clf.fit(x_train, y_train) best_params_logreg.append(clf.best_params_) clf = SGDClassifier(loss='log', penalty='l2', alpha=0.1, n_jobs=-1) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) conf_log_ref = confusion_matrix(y_test, y_pred) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.xlim([0, 1]) plt.ylim([0, 1]) a = log_loss(y_test, probs) tn = conf_log_ref[0, 0] fp = conf_log_ref[0, 1] fn = conf_log_ref[1, 0] tp = conf_log_ref[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) tp = conf_log_ref[0][0] tn = conf_log_ref[1][1] start_time = time.time() best_params_SVM = [] parameters = {'loss': ['hinge'], 'penalty': ['l1', 'l2', 'elasticnet'], 'alpha': [float(i) / 10 for i in range(1, 10, 1)], 'n_jobs': [-1]} clf = SGDClassifier() clf = GridSearchCV(clf, parameters, cv=5) clf.fit(x_train, y_train) best_params_SVM = clf.best_params_ print('Best hyperparameters for linear SVM:', best_params_SVM) print('------{} seconds-------'.format(time.time() - start_time))
code
17120078/cell_14
[ "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.feature_extraction.text import CountVectorizer from sklearn.linear_model import SGDClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss from sklearn.metrics import roc_curve, auc from sklearn.metrics import roc_curve, auc from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.naive_bayes import MultinomialNB import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time import time import warnings data = pd.read_pickle('../input/515k-reviews-after-preprocessing/After_filling_Nans') df = pd.read_pickle('../input/515k-reviews-after-preprocessing/After preprocessing') summary = np.array(df.Summary) score = df['score'].values import time from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split start_time = time.time() best_params = [] parameters = {'alpha': [i for i in range(1, 100, 10)]} acc = [] score = list(score) for i in range(2000, 14000, 1000): vec = CountVectorizer(max_features=i) data = vec.fit_transform(summary) nb = MultinomialNB() clf = GridSearchCV(nb, parameters, cv=5) x_train, x_test, y_train, y_test = train_test_split(data, score, test_size=0.3, random_state=42) clf.fit(x_train, y_train) acc.append(100.0 * sum(clf.predict(x_test)) / len(clf.predict(x_test))) best_params.append(clf.best_params_) vec = 0 data = 0 ##Confusion matrix def show_confusion_matrix(C,class_labels=['0','1']): """ C: ndarray, shape (2,2) as given by scikit-learn confusion_matrix function class_labels: list of strings, default simply labels 0 and 1. Draws confusion matrix with associated metrics. """ import matplotlib.pyplot as plt import numpy as np assert C.shape == (2,2), "Confusion matrix should be from binary classification only." # true negative, false positive, false negative, true positive tn = C[0,0]; fp = C[0,1]; fn = C[1,0]; tp = C[1,1]; NP = fn+tp # Num positive examples NN = tn+fp # Num negative examples N = NP+NN # Total num of examples fig = plt.figure(figsize=(8,8)) ax = fig.add_subplot(111) ax.imshow(C, interpolation='nearest', cmap=plt.cm.gray) # Draw the grid boxes ax.set_xlim(-0.5,2.5) ax.set_ylim(2.5,-0.5) ax.plot([-0.5,2.5],[0.5,0.5], '-k', lw=2) ax.plot([-0.5,2.5],[1.5,1.5], '-k', lw=2) ax.plot([0.5,0.5],[-0.5,2.5], '-k', lw=2) ax.plot([1.5,1.5],[-0.5,2.5], '-k', lw=2) # Set xlabels ax.set_xlabel('Predicted Label', fontsize=16) ax.set_xticks([0,1,2]) ax.set_xticklabels(class_labels + ['']) ax.xaxis.set_label_position('top') ax.xaxis.tick_top() # These coordinate might require some tinkering. Ditto for y, below. ax.xaxis.set_label_coords(0.34,1.06) # Set ylabels ax.set_ylabel('True Label', fontsize=16, rotation=90) ax.set_yticklabels(class_labels + [''],rotation=90) ax.set_yticks([0,1,2]) ax.yaxis.set_label_coords(-0.09,0.65) # Fill in initial metrics: tp, tn, etc... ax.text(0,0, 'True Neg: %d\n(Num Neg: %d)'%(tn,NN), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,1, 'False Neg: %d'%fn, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,0, 'False Pos: %d'%fp, va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,1, 'True Pos: %d\n(Num Pos: %d)'%(tp,NP), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) # Fill in secondary metrics: accuracy, true pos rate, etc... ax.text(2,0, 'False Pos Rate: %.2f'%(fp / (fp+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,1, 'True Pos Rate: %.2f'%(tp / (tp+fn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(2,2, 'Accuracy: %.2f'%((tp+tn+0.)/N), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(0,2, 'Neg Pre Val: %.2f'%(1-fn/(fn+tn+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) ax.text(1,2, 'Pos Pred Val: %.2f'%(tp/(tp+fp+0.)), va='center', ha='center', bbox=dict(fc='w',boxstyle='round,pad=1')) plt.tight_layout() plt.show() start_time = time.time() from sklearn.metrics import confusion_matrix from sklearn.metrics import log_loss score_Log_reg = [] y_pred = clf.predict(x_test) conf_NB = confusion_matrix(y_test, y_pred) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.xlim([0, 1]) plt.ylim([0, 1]) a = log_loss(y_test, probs) tn = conf_NB[0, 0] fp = conf_NB[0, 1] fn = conf_NB[1, 0] tp = conf_NB[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) tp = conf_NB[0][0] tn = conf_NB[1][1] import warnings from sklearn.linear_model import SGDClassifier warnings.filterwarnings('ignore') start_time = time.time() best_params_logreg = [] parameters = {'loss': ['log'], 'penalty': ['l1', 'l2', 'elasticnet'], 'alpha': [float(i) / 10 for i in range(1, 10, 1)], 'n_jobs': [-1]} warnings.filterwarnings('ignore') clf = SGDClassifier() clf = GridSearchCV(clf, parameters, cv=5) clf.fit(x_train, y_train) best_params_logreg.append(clf.best_params_) clf = SGDClassifier(loss='log', penalty='l2', alpha=0.1, n_jobs=-1) clf.fit(x_train, y_train) y_pred = clf.predict(x_test) conf_log_ref = confusion_matrix(y_test, y_pred) print('Confusion matrix:\n', conf_log_ref) from sklearn.metrics import roc_curve, auc probs = clf.predict_proba(x_test) preds = probs[:, 1] fpr, tpr, threshold = roc_curve(y_test, preds) roc_auc = auc(fpr, tpr) import matplotlib.pyplot as plt plt.title('Receiver Operating Characteristic') plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show() a = log_loss(y_test, probs) print('The log loss for the Logistic regression is:', a) show_confusion_matrix(conf_log_ref, ['Negative', 'Positive']) tn = conf_log_ref[0, 0] fp = conf_log_ref[0, 1] fn = conf_log_ref[1, 0] tp = conf_log_ref[1, 1] precision = 100 * float(tp) / (tp + fp) recall = 100 * float(tp) / (tp + fn) print('Precision :', precision) print('Recall :', recall) tp = conf_log_ref[0][0] tn = conf_log_ref[1][1] print('The accuracy is {} %'.format(round(100.0 * (tp + tn) / len(y_test), 2)))
code
18136679/cell_13
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_4.png", "text_plain_output_10.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from gensim.models import Word2Vec from tqdm import tqdm import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ text_split = [] for text_ in tqdm(text): text_split_ = [] for i in range(len(text_)): text_split_.append(text_[i]) text_split.append(text_split_) model = Word2Vec(text_split, size=100, window=5, min_count=1, workers=1, iter=30) model['公']
code
18136679/cell_6
[ "text_plain_output_1.png" ]
from tqdm import tqdm import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) print(str(len(text_)) + 'in document' + str(doc_)) text += text_
code
18136679/cell_2
[ "text_plain_output_1.png" ]
!pip install python-docx
code
18136679/cell_11
[ "text_plain_output_1.png" ]
from tqdm import tqdm import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ text_split = [] for text_ in tqdm(text): text_split_ = [] for i in range(len(text_)): text_split_.append(text_[i]) text_split.append(text_split_) text_split[0]
code
18136679/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input/research_data/research_data/'))
code
18136679/cell_7
[ "text_plain_output_1.png" ]
from tqdm import tqdm import docx import jieba import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ words = set([]) for text_ in tqdm(text): words = words | set(jieba.cut(text_, cut_all=True)) words = list(words)
code
18136679/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import docx import jieba import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ words = set([]) for text_ in tqdm(text): words = words | set(jieba.cut(text_, cut_all=True)) words = list(words) len(words)
code
18136679/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tqdm import tqdm import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ length = [len(text_) for text_ in text] max(length)
code
18136679/cell_10
[ "text_plain_output_1.png" ]
from tqdm import tqdm import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc text = [] for doc_ in tqdm(doc): path_ = path + doc_ text_ = read_data(path_) text += text_ text_split = [] for text_ in tqdm(text): text_split_ = [] for i in range(len(text_)): text_split_.append(text_[i]) text_split.append(text_split_)
code
18136679/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import docx import os import re import numpy as np import pandas as pd import os def read_data(file_path): text = [] none = 0 doc = docx.Document(file_path) for para in doc.paragraphs: content = para.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 for table in doc.tables: for row in table.rows: for cell in row.cells: content = cell.text filter_ = re.compile(u'[^一-龥]') filtered_content = filter_.sub('', content) if len(filtered_content) > 0: text.append(filtered_content) else: none += 1 return text path = '../input/research_data/research_data/' doc = os.listdir('../input/research_data/research_data/') doc
code
32070353/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us
code
32070353/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us
code
32070353/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us plt.figure(figsize=(18, 5)) sns.set_style('whitegrid') sns.barplot(x='date', y='confirmed', data=us) plt.show()
code
32070353/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.head()
code
32070353/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us plt.figure(figsize=(18, 5)) sns.barplot(x='date', y='confirmed', data=us) plt.show()
code
32070353/cell_33
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns italy = data[data['Country/Region'] == 'Italy'] italy
code
32070353/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns
code
32070353/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us sns.set_style('whitegrid') sns.set_style('whitegrid') plt.xticks(rotation=90) plt.figure(figsize=(18, 5)) sns.set_style('whitegrid') sns.lineplot(x='date', y='confirmed', data=us) plt.xticks(rotation=90) plt.show()
code
32070353/cell_39
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns italy = data[data['Country/Region'] == 'Italy'] italy italy = italy.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) italy italy = italy.T italy italy = italy.reset_index() italy
code
32070353/cell_41
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns italy = data[data['Country/Region'] == 'Italy'] italy italy = italy.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) italy italy = italy.T italy italy = italy.reset_index() italy italy = italy.rename(columns={'index': 'date', 137: 'confirmed'}) italy
code
32070353/cell_2
[ "text_html_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32070353/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us
code
32070353/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T
code
32070353/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us
code
32070353/cell_35
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns italy = data[data['Country/Region'] == 'Italy'] italy italy = italy.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) italy
code
32070353/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us sns.set_style('whitegrid') sns.set_style('whitegrid') plt.xticks(rotation=90) sns.set_style('whitegrid') plt.xticks(rotation=90) plt.figure(figsize=(18, 5)) sns.set_style('whitegrid') sns.scatterplot(x='date', y='confirmed', data=us) plt.xticks(rotation=90) plt.show()
code
32070353/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns us = data[data['Country/Region'] == 'US'] us us = us.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) us.T us = us.T.reset_index() us us = us.rename(columns={'index': 'date', 225: 'confirmed'}) us sns.set_style('whitegrid') plt.figure(figsize=(18, 5)) sns.set_style('whitegrid') sns.barplot(x='date', y='confirmed', data=us) plt.xticks(rotation=90) plt.show()
code
32070353/cell_37
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/novel-corona-virus-2019-dataset/time_series_covid_19_confirmed.csv') data.columns italy = data[data['Country/Region'] == 'Italy'] italy italy = italy.drop(['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1) italy italy = italy.T italy
code
121150055/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df2.isnull().sum()
code
121150055/cell_23
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics import accuracy_score from sklearn.svm import SVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.isnull().sum() df = df.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df = df.applymap(lambda s: s.lower() if isinstance(s, str) else s) count_vectorizer = CountVectorizer() tfidf_vectorizer = TfidfVectorizer() X_train_count = count_vectorizer.fit_transform(df['clean_text']) X_train_tfidf = tfidf_vectorizer.fit_transform(df['clean_text']) X_train_combined = pd.concat([pd.DataFrame(X_train_count.toarray()), pd.DataFrame(X_train_tfidf.toarray())], axis=1) svm_model = SVC(kernel='linear') svm_model.fit(X_train_combined, df['Label']) y_pred = svm_model.predict(X_train_combined) accuracy = accuracy_score(df['Label'], y_pred) print('Accuracy:', accuracy)
code
121150055/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df2.head()
code
121150055/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121150055/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.isnull().sum()
code
121150055/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df2.isnull().sum() df2 = df2.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df2
code
121150055/cell_3
[ "text_html_output_1.png" ]
import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet')
code
121150055/cell_24
[ "text_plain_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.metrics import accuracy_score from sklearn.svm import SVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.isnull().sum() df2.isnull().sum() df = df.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df2 = df2.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df = df.applymap(lambda s: s.lower() if isinstance(s, str) else s) df2 = df2.applymap(lambda s: s.lower() if isinstance(s, str) else s) count_vectorizer = CountVectorizer() tfidf_vectorizer = TfidfVectorizer() X_train_count = count_vectorizer.fit_transform(df['clean_text']) X_train_tfidf = tfidf_vectorizer.fit_transform(df['clean_text']) X_train_combined = pd.concat([pd.DataFrame(X_train_count.toarray()), pd.DataFrame(X_train_tfidf.toarray())], axis=1) svm_model = SVC(kernel='linear') svm_model.fit(X_train_combined, df['Label']) y_pred = svm_model.predict(X_train_combined) accuracy = accuracy_score(df['Label'], y_pred) X_test_count = count_vectorizer.transform(df2['clean_text']) X_test_tfidf = tfidf_vectorizer.transform(df2['clean_text']) X_test_combined = pd.concat([pd.DataFrame(X_test_count.toarray()), pd.DataFrame(X_test_tfidf.toarray())], axis=1) y_pred = svm_model.predict(X_test_combined) accuracy = accuracy_score(df2['Label'], y_pred) print('Accuracy:', accuracy)
code
121150055/cell_14
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.isnull().sum() df = df.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df
code
121150055/cell_22
[ "text_html_output_1.png" ]
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer from sklearn.svm import SVC import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.isnull().sum() df = df.replace(to_replace='[^0-9a-zA-Z ]+', value='', regex=True) df = df.applymap(lambda s: s.lower() if isinstance(s, str) else s) count_vectorizer = CountVectorizer() tfidf_vectorizer = TfidfVectorizer() X_train_count = count_vectorizer.fit_transform(df['clean_text']) X_train_tfidf = tfidf_vectorizer.fit_transform(df['clean_text']) X_train_combined = pd.concat([pd.DataFrame(X_train_count.toarray()), pd.DataFrame(X_train_tfidf.toarray())], axis=1) svm_model = SVC(kernel='linear') svm_model.fit(X_train_combined, df['Label'])
code
121150055/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_train.csv', encoding='cp1252') df2 = pd.read_csv('/kaggle/input/email-classification-nlp/SMS_test.csv', encoding='cp1252') df.head()
code
32072941/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') train.head(2)
code
32072941/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32072941/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv') test = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv') test.head(2)
code
74060349/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pydicom def make_lut(pixels, width, center, p_i): slope = 1.0 intercept = 0.0 min_pixel = int(np.amin(pixels)) max_pixel = int(np.amax(pixels)) lut = [0] * (max_pixel + 1) invert = False if p_i == 'MONOCHROME1': invert = True else: center = max_pixel - min_pixel - center for pix_value in range(min_pixel, max_pixel): lut_value = pix_value * slope + intercept voi_value = ((lut_value - center) / width + 0.5) * 255.0 clamped_value = min(max(voi_value, 0), 255) if invert: lut[pix_value] = round(255 - clamped_value) else: lut[pix_value] = round(clamped_value) return lut from pydicom.pixel_data_handlers.util import apply_voi_lut image = pydicom.dcmread('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00012/T1w/Image-17.dcm') pixels = image.pixel_array print('Min pixel value: ' + str(np.min(pixels))) print('Max pixel value: ' + str(np.max(pixels))) plt.figure(figsize=(6, 6)) plt.imshow(pixels, cmap='gray')
code
74060349/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pydicom def make_lut(pixels, width, center, p_i): slope = 1.0 intercept = 0.0 min_pixel = int(np.amin(pixels)) max_pixel = int(np.amax(pixels)) lut = [0] * (max_pixel + 1) invert = False if p_i == 'MONOCHROME1': invert = True else: center = max_pixel - min_pixel - center for pix_value in range(min_pixel, max_pixel): lut_value = pix_value * slope + intercept voi_value = ((lut_value - center) / width + 0.5) * 255.0 clamped_value = min(max(voi_value, 0), 255) if invert: lut[pix_value] = round(255 - clamped_value) else: lut[pix_value] = round(clamped_value) return lut from pydicom.pixel_data_handlers.util import apply_voi_lut image = pydicom.dcmread('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00012/T1w/Image-17.dcm') pixels = image.pixel_array fig, axes = plt.subplots(nrows=1, ncols=1, sharex=False, sharey=False, figsize=(10, 4)) plt.title('Pixel Range: ' + str(np.min(pixels)) + '-' + str(np.max(pixels))) plt.hist(pixels.ravel(), np.max(pixels), (1, np.max(pixels))) plt.tight_layout() plt.show()
code
74060349/cell_10
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pydicom def make_lut(pixels, width, center, p_i): slope = 1.0 intercept = 0.0 min_pixel = int(np.amin(pixels)) max_pixel = int(np.amax(pixels)) lut = [0] * (max_pixel + 1) invert = False if p_i == 'MONOCHROME1': invert = True else: center = max_pixel - min_pixel - center for pix_value in range(min_pixel, max_pixel): lut_value = pix_value * slope + intercept voi_value = ((lut_value - center) / width + 0.5) * 255.0 clamped_value = min(max(voi_value, 0), 255) if invert: lut[pix_value] = round(255 - clamped_value) else: lut[pix_value] = round(clamped_value) return lut def apply_lut(pixels_in, lut): pixels_in = pixels_in.flatten() pixels_out = [0] * len(pixels_in) for i in range(0, len(pixels_in)): pixel = pixels_in[i] pixels_out[i] = int(lut[pixel]) return pixels_out from pydicom.pixel_data_handlers.util import apply_voi_lut image = pydicom.dcmread('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00012/T1w/Image-17.dcm') pixels = image.pixel_array # Plot a histogram of the raw pixel data fig, axes = plt.subplots(nrows=1, ncols=1,sharex=False, sharey=False, figsize=(10,4)) plt.title('Pixel Range: ' + str(np.min(pixels)) + '-' + str(np.max(pixels))) plt.hist(pixels.ravel(), np.max(pixels), (1, np.max(pixels))) plt.tight_layout() plt.show() window_width_1 = np.max(image.pixel_array) window_center_1 = window_width_1 / 2 lut = make_lut(image.pixel_array, window_width_1, window_center_1, image.PhotometricInterpretation) image1 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) window_width_2 = 450 window_center_2 = 450 lut = make_lut(image.pixel_array, window_width_2, window_center_2, image.PhotometricInterpretation) image2 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) window_width_3 = 900 window_center_3 = 90 lut = make_lut(image.pixel_array, window_width_3, window_center_3, image.PhotometricInterpretation) image3 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(12, 12)) ax = axes.ravel() ax[0].set_title('Default Image') ax[0].imshow(image.pixel_array, cmap='gray') ax[1].set_title(f'Width: {window_width_1} / Center: {window_center_1}') ax[1].imshow(image1, cmap='gray') ax[2].set_title(f'Width: {window_width_2} / Center: {window_center_2}') ax[2].imshow(image2, cmap='gray') ax[3].set_title(f'Width: {window_width_3} / Center: {window_center_3}') ax[3].imshow(image3, cmap='gray') plt.tight_layout() plt.show()
code
74060349/cell_12
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pydicom def make_lut(pixels, width, center, p_i): slope = 1.0 intercept = 0.0 min_pixel = int(np.amin(pixels)) max_pixel = int(np.amax(pixels)) lut = [0] * (max_pixel + 1) invert = False if p_i == 'MONOCHROME1': invert = True else: center = max_pixel - min_pixel - center for pix_value in range(min_pixel, max_pixel): lut_value = pix_value * slope + intercept voi_value = ((lut_value - center) / width + 0.5) * 255.0 clamped_value = min(max(voi_value, 0), 255) if invert: lut[pix_value] = round(255 - clamped_value) else: lut[pix_value] = round(clamped_value) return lut def apply_lut(pixels_in, lut): pixels_in = pixels_in.flatten() pixels_out = [0] * len(pixels_in) for i in range(0, len(pixels_in)): pixel = pixels_in[i] pixels_out[i] = int(lut[pixel]) return pixels_out from pydicom.pixel_data_handlers.util import apply_voi_lut image = pydicom.dcmread('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00012/T1w/Image-17.dcm') pixels = image.pixel_array # Plot a histogram of the raw pixel data fig, axes = plt.subplots(nrows=1, ncols=1,sharex=False, sharey=False, figsize=(10,4)) plt.title('Pixel Range: ' + str(np.min(pixels)) + '-' + str(np.max(pixels))) plt.hist(pixels.ravel(), np.max(pixels), (1, np.max(pixels))) plt.tight_layout() plt.show() window_width_1 = np.max(image.pixel_array) window_center_1 = window_width_1 / 2 lut = make_lut(image.pixel_array, window_width_1, window_center_1, image.PhotometricInterpretation) image1 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) window_width_2 = 450 window_center_2 = 450 lut = make_lut(image.pixel_array, window_width_2, window_center_2, image.PhotometricInterpretation) image2 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) window_width_3 = 900 window_center_3 = 90 lut = make_lut(image.pixel_array, window_width_3, window_center_3, image.PhotometricInterpretation) image3 = np.reshape(apply_lut(pixels, lut), (pixels.shape[0], pixels.shape[1])) fig, axes = plt.subplots(nrows=2, ncols=2,sharex=True, sharey=True, figsize=(12, 12)) ax = axes.ravel() ax[0].set_title('Default Image') ax[0].imshow(image.pixel_array, cmap='gray') ax[1].set_title(f'Width: {window_width_1} / Center: {window_center_1}') ax[1].imshow(image1, cmap='gray') ax[2].set_title(f'Width: {window_width_2} / Center: {window_center_2}') ax[2].imshow(image2, cmap='gray') ax[3].set_title(f'Width: {window_width_3} / Center: {window_center_3}') ax[3].imshow(image3, cmap='gray') plt.tight_layout() plt.show() image = pydicom.dcmread('../input/rsna-miccai-brain-tumor-radiogenomic-classification/train/00014/FLAIR/Image-126.dcm') pixels = image.pixel_array print('Min pixel value: ' + str(np.min(pixels))) print('Max pixel value: ' + str(np.max(pixels))) plt.figure(figsize=(6, 6)) plt.imshow(pixels, cmap='gray')
code
104127018/cell_21
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler from yellowbrick.cluster import KElbowVisualizer import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) kmeans = KMeans() elbow = KElbowVisualizer(kmeans, k=(2, 20)) elbow.fit(model_df) elbow.show()
code
104127018/cell_9
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() def check_df(dataframe, head=5): print('##################### Shape #####################') print(dataframe.shape) print('##################### Types #####################') print(dataframe.dtypes) print('##################### Head #####################') print(dataframe.head(head)) print('##################### Tail #####################') print(dataframe.tail(head)) print('##################### is null? #####################') print(dataframe.isnull().sum()) print('##################### Quantiles #####################') print(dataframe.quantile([0, 0.05, 0.5, 0.95, 0.99, 1]).T) print(dataframe.describe().T) check_df(df)
code
104127018/cell_34
[ "image_output_1.png" ]
from sklearn.cluster import AgglomerativeClustering from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) k_means = KMeans(n_clusters=7, random_state=42).fit(model_df) segments = k_means.labels_ segments final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.groupby('segment').agg({'order_num_total_ever_online': ['mean', 'min', 'max'], 'order_num_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_online': ['mean', 'min', 'max'], 'recency': ['mean', 'min', 'max'], 'tenure': ['mean', 'min', 'max', 'count']}) hc = AgglomerativeClustering(n_clusters=5) segments = hc.fit_predict(model_df) final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.groupby('segment').agg({'order_num_total_ever_online': ['mean', 'min', 'max'], 'order_num_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_online': ['mean', 'min', 'max'], 'recency': ['mean', 'min', 'max'], 'tenure': ['mean', 'min', 'max', 'count']})
code
104127018/cell_23
[ "text_html_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) k_means = KMeans(n_clusters=7, random_state=42).fit(model_df) segments = k_means.labels_ segments
code
104127018/cell_29
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) hc_complete = linkage(model_df, 'complete') plt.figure(figsize=(7, 5)) plt.title('Dendograms') dend = dendrogram(hc_complete, truncate_mode='lastp', p=10, show_contracted=True, leaf_font_size=10) plt.axhline(y=1.2, color='r', linestyle='--') plt.show()
code
104127018/cell_26
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) k_means = KMeans(n_clusters=7, random_state=42).fit(model_df) segments = k_means.labels_ segments final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.groupby('segment').agg({'order_num_total_ever_online': ['mean', 'min', 'max'], 'order_num_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_online': ['mean', 'min', 'max'], 'recency': ['mean', 'min', 'max'], 'tenure': ['mean', 'min', 'max', 'count']})
code
104127018/cell_19
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) model_df.head()
code
104127018/cell_7
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() df.head()
code
104127018/cell_32
[ "text_html_output_1.png" ]
from sklearn.cluster import AgglomerativeClustering from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) k_means = KMeans(n_clusters=7, random_state=42).fit(model_df) segments = k_means.labels_ segments final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.groupby('segment').agg({'order_num_total_ever_online': ['mean', 'min', 'max'], 'order_num_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_offline': ['mean', 'min', 'max'], 'customer_value_total_ever_online': ['mean', 'min', 'max'], 'recency': ['mean', 'min', 'max'], 'tenure': ['mean', 'min', 'max', 'count']}) hc = AgglomerativeClustering(n_clusters=5) segments = hc.fit_predict(model_df) final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.head()
code
104127018/cell_16
[ "text_plain_output_1.png" ]
"""#SKEWNESS def check_skew(df_skew, column): skew = stats.skew(df_skew[column]) skewtest = stats.skewtest(df_skew[column]) plt.title('Distribution of ' + column) sns.histplot(df_skew[column],color = "g") print("{}'s: Skew: {}, : {}".format(column, skew, skewtest)) return plt.figure(figsize=(9, 9)) plt.subplot(6, 1, 1) check_skew(model_df,'order_num_total_ever_online') plt.subplot(6, 1, 2) check_skew(model_df,'order_num_total_ever_offline') plt.subplot(6, 1, 3) check_skew(model_df,'customer_value_total_ever_offline') plt.subplot(6, 1, 4) check_skew(model_df,'customer_value_total_ever_online') plt.subplot(6, 1, 5) check_skew(model_df,'recency') plt.subplot(6, 1, 6) check_skew(model_df,'tenure') plt.tight_layout() plt.savefig('before_transform.png', format='png', dpi=1000) plt.show() """
code
104127018/cell_17
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) model_df.head()
code
104127018/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans from sklearn.preprocessing import MinMaxScaler import numpy as np import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df['order_num_total_ever_online'] = np.log1p(model_df['order_num_total_ever_online']) model_df['order_num_total_ever_offline'] = np.log1p(model_df['order_num_total_ever_offline']) model_df['customer_value_total_ever_offline'] = np.log1p(model_df['customer_value_total_ever_offline']) model_df['customer_value_total_ever_online'] = np.log1p(model_df['customer_value_total_ever_online']) model_df['recency'] = np.log1p(model_df['recency']) model_df['tenure'] = np.log1p(model_df['tenure']) sc = MinMaxScaler((0, 1)) model_scaling = sc.fit_transform(model_df) model_df = pd.DataFrame(model_scaling, columns=model_df.columns) k_means = KMeans(n_clusters=7, random_state=42).fit(model_df) segments = k_means.labels_ segments final_df = df[['master_id', 'order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] final_df['segment'] = segments final_df.head()
code
104127018/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) model_df = df[['order_num_total_ever_online', 'order_num_total_ever_offline', 'customer_value_total_ever_offline', 'customer_value_total_ever_online', 'recency', 'tenure']] model_df.head()
code
104127018/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd from scipy import stats import datetime as dt import matplotlib.pyplot as plt from sklearn.preprocessing import MinMaxScaler from sklearn.cluster import KMeans from scipy.cluster.hierarchy import dendrogram from scipy.cluster.hierarchy import linkage from yellowbrick.cluster import KElbowVisualizer from sklearn.cluster import AgglomerativeClustering import seaborn as sns import numpy as np import sys pd.set_option('display.max_columns', None) pd.set_option('display.max_rows', None) pd.set_option('display.float_format', lambda x: '%.2f' % x) pd.set_option('display.width', 1000) df_ = pd.read_csv('../input/data20k/flo_data_20k.csv') df = df_.copy() date_columns = df.columns[df.columns.str.contains('date')] df[date_columns] = df[date_columns].apply(pd.to_datetime) df.info()
code
104114369/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd pd.read_csv('data/imdb/train.csv').sample(5) pd.read_csv('data/imdb/valid.csv').sample(5)
code
104114369/cell_2
[ "text_html_output_1.png" ]
!pip install 'lightning-flash[text]' -q
code
104114369/cell_11
[ "text_plain_output_1.png" ]
from flash.text import TextClassificationData, TextClassifier import flash import torch datamodule = TextClassificationData.from_csv('review', 'sentiment', train_file='data/imdb/train.csv', val_file='data/imdb/valid.csv', batch_size=4) model = TextClassifier(backbone='gchhablani/bert-base-cased-finetuned-sst2', labels=datamodule.labels) trainer = flash.Trainer(max_epochs=2, gpus=torch.cuda.device_count()) trainer.finetune(model, datamodule=datamodule, strategy='freeze')
code
104114369/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd pd.read_csv('data/imdb/train.csv').sample(5)
code
104114369/cell_3
[ "text_html_output_1.png" ]
!pip install 'lightning-flash[serve]' -q
code
104114369/cell_17
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from flash.text import TextClassificationData, TextClassifier import flash import torch datamodule = TextClassificationData.from_csv('review', 'sentiment', train_file='data/imdb/train.csv', val_file='data/imdb/valid.csv', batch_size=4) model = TextClassifier(backbone='gchhablani/bert-base-cased-finetuned-sst2', labels=datamodule.labels) trainer = flash.Trainer(max_epochs=2, gpus=torch.cuda.device_count()) trainer.finetune(model, datamodule=datamodule, strategy='freeze') datamodule = TextClassificationData.from_lists(predict_data=["Joker's performance was outstanding!!!", 'This is the best movie ever!!!', 'This movie was terrible what a waste of time'], batch_size=10) reloaded_model = TextClassifier.load_from_checkpoint('text_classification_model.pt') flash.Trainer().predict(reloaded_model, datamodule=datamodule, output='labels')
code
104114369/cell_10
[ "text_plain_output_1.png" ]
from flash.text import TextClassificationData, TextClassifier datamodule = TextClassificationData.from_csv('review', 'sentiment', train_file='data/imdb/train.csv', val_file='data/imdb/valid.csv', batch_size=4) model = TextClassifier(backbone='gchhablani/bert-base-cased-finetuned-sst2', labels=datamodule.labels)
code
105212784/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
place = input('Enter the place you want to visit:') budget = int(input('Enter your budget:')) if place == 'sea': print('we can go') if budget >= 3000: print('hurry up, tickets are ready')
code
48162853/cell_13
[ "text_plain_output_1.png" ]
from datetime import datetime from sklearn.linear_model import LinearRegression, Lasso import matplotlib.pyplot as plt import pandas as pd import random import seaborn as sns def evaluate_preds(train_true_values, train_pred_values, test_true_values, test_pred_values): pass TRAIN_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/train.csv' TEST_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/test.csv' class DataPreprocessing: """Подготовка исходных данных""" def __init__(self): """Параметры класса""" self.medians = None self.kitchen_square_quantile = None def fit(self, X): """Сохранение статистик""" self.medians = X.median() self.kitchen_square_quantile = X['KitchenSquare'].quantile(0.975) def transform(self, X): """Трансформация данных""" X['Rooms_outlier'] = 0 X.loc[(X['Rooms'] == 0) | (X['Rooms'] >= 6), 'Rooms_outlier'] = 1 X.loc[X['Rooms'] == 0, 'Rooms'] = 1 X.loc[X['Rooms'] >= 6, 'Rooms'] = self.medians['Rooms'] condition = X['KitchenSquare'].isna() | (X['KitchenSquare'] > 16) X.loc[condition, 'KitchenSquare'] = self.medians['KitchenSquare'] X.loc[X['KitchenSquare'] < 6, 'KitchenSquare'] = 6 X['HouseFloor_outlier'] = 0 X.loc[X['HouseFloor'] == 0, 'HouseFloor_outlier'] = 1 X.loc[X['Floor'] > X['HouseFloor'], 'HouseFloor_outlier'] = 1 X.loc[X['HouseFloor'] == 0, 'HouseFloor'] = self.medians['HouseFloor'] floor_outliers = X.loc[X['Floor'] > X['HouseFloor']].index X.loc[floor_outliers, 'Floor'] = X.loc[floor_outliers, 'HouseFloor'].apply(lambda x: random.randint(1, x)) current_year = datetime.now().year X['HouseYear_outlier'] = 0 X.loc[X['HouseYear'] > current_year, 'HouseYear_outlier'] = 1 X.loc[X['HouseYear'] > current_year, 'HouseYear'] = current_year if 'Healthcare_1' in X.columns: X.drop('Healthcare_1', axis=1, inplace=True) X['LifeSquare_nan'] = X['LifeSquare'].isna() * 1 condition = X['LifeSquare'].isna() & ~X['Square'].isna() & ~X['KitchenSquare'].isna() X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X.fillna(self.medians, inplace=True) X['TestSquare'] = X['Square'] - X['LifeSquare'] - X['KitchenSquare'] condition1 = (X['TestSquare'] < 0) | (X['LifeSquare'] < 10) & (X['LifeSquare'] > 0) X.loc[condition1, 'LifeSquare'] = X.loc[condition1, 'Square'] - X.loc[condition1, 'KitchenSquare'] - 10 if 'TestSquare' in X.columns: X.drop('TestSquare', axis=1, inplace=True) return X class FeatureGenetator: """Генерация новых фич""" def __init__(self): self.floor_max = None self.agg_table_rooms_district = None self.agg_table_rooms = None self.add_dummies_columns = None def fit(self, X, y=None): X = X.copy() df = X[(X['LifeSquare'] > 0) & (X['LifeSquare'] < 275)] df['min_square'] = df['Square'] df['max_square'] = df['Square'] df['avg_square'] = df['Square'] df['min_price'] = df['Price'] df['max_price'] = df['Price'] df['avg_price'] = df['Price'] if y is not None: self.agg_table_rooms_district = df.groupby(['DistrictId', 'Rooms'], as_index=False).agg({'Price': 'sum', 'Square': 'sum'}).rename(columns={'Price': 'SumPrice', 'Square': 'SumSquare'}) self.agg_table_rooms = df.groupby(['Rooms'], as_index=False).agg({'min_square': 'min', 'min_price': 'min', 'max_square': 'max', 'max_price': 'max', 'avg_square': 'mean', 'avg_price': 'mean'}) if y is not None: self.floor_max = df['Floor'].max() self.house_year_max = df['HouseYear'].max() df = self.floor_to_cat(df) df = self.year_to_cat(df) def transform(self, X): X = self.floor_to_cat(X) X = self.year_to_cat(X) if self.agg_table_rooms_district is not None: X = X.merge(self.agg_table_rooms_district, on=['DistrictId', 'Rooms'], how='left') if self.agg_table_rooms is not None: X = X.merge(self.agg_table_rooms, on=['Rooms'], how='left') if 'Price' in X.columns: X['PriceCoeff'] = (X['Price'] - X['min_price']) / (X['max_price'] - X['min_price']) else: X['PriceCoeff'] = random.random() condition = (X['LifeSquare'] < 0) | (X['LifeSquare'] > 275) X.loc[condition, 'Square'] = X.loc[condition, 'min_square'] + (X.loc[condition, 'max_square'] - X.loc[condition, 'min_square']) * X.loc[condition, 'PriceCoeff'] X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X['base_price'] = X['SumPrice'] / X['SumSquare'] X['base_price'].fillna(1000, inplace=True) X['SquareCoeff'] = (X['Square'] - X['min_square']) / (X['max_square'] - X['min_square']) X = self.SquareCoeff_to_cat(X) X = self.KitchenSquare_to_cat(X) X = self.HouseFloor_to_cat(X) X = self.Social_1_to_cat(X) X = self.Social_2_to_cat(X) X = self.Social_3_to_cat(X) columns_old = set(X.columns.tolist()) X = pd.get_dummies(X.copy(), columns=['year_cat', 'floor_cat', 'Ecology_2', 'Ecology_3', 'Shops_2', 'SquareCoeff_cat', 'KitchenSquare_cat', 'house_floor_cat', 'Social_1_cat', 'Social_2_cat', 'Social_3_cat']) X['Ecology_1'] = X['Ecology_1'] * X['base_price'] * X['Square'] columns_new = set(X.columns.tolist()) self.add_dummies_columns = columns_new - columns_old for col_name in list(self.add_dummies_columns): X[col_name] = X[col_name] * X['base_price'] * X['Square'] return X def Social_1_to_cat(self, X): bins = [0, 10, 25, 50, X['Social_1'].max()] X['Social_1_cat'] = pd.cut(X['Social_1'], bins=bins, labels=False) X['Social_1_cat'].fillna(0, inplace=True) return X def Social_2_to_cat(self, X): bins = [0, 500, 1000, 5000, 10000, X['Social_2'].max()] X['Social_2_cat'] = pd.cut(X['Social_2'], bins=bins, labels=False) X['Social_2_cat'].fillna(0, inplace=True) return X def Social_3_to_cat(self, X): bins = [0, 3, 20, 60, 100, X['Social_3'].max()] X['Social_3_cat'] = pd.cut(X['Social_3'], bins=bins, labels=False) X['Social_3_cat'].fillna(0, inplace=True) return X def KitchenSquare_to_cat(self, X): bins = [0, 9, 12, X['KitchenSquare'].max()] X['KitchenSquare_cat'] = pd.cut(X['KitchenSquare'], bins=bins, labels=False) X['KitchenSquare_cat'].fillna(0, inplace=True) return X def SquareCoeff_to_cat(self, X): bins = [0, 2, 4, 6, 8, 10] X['SquareCoeff_cat'] = pd.cut(X['SquareCoeff'] * 10, bins=bins, labels=False) X['SquareCoeff_cat'].fillna(0, inplace=True) return X def shops_to_cat(self, X): bins = [0, 2, 6, 16, self.floor_max] X['floor_cat'] = pd.cut(X['Shops_1'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def HouseFloor_to_cat(self, X): bins = [0, 5, 9, 24, 35, X['HouseFloor'].max()] X['house_floor_cat'] = pd.cut(X['HouseFloor'], bins=bins, labels=False) X['house_floor_cat'].fillna(-1, inplace=True) return X def floor_to_cat(self, X): bins = [0, 3, 5, 9, 15, X['Floor'].max()] X['floor_cat'] = pd.cut(X['Floor'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def year_to_cat(self, X): bins = [0, 1925, 1941, 1945, 1955, 1965, 1985, 1995, 2005, self.house_year_max] X['year_cat'] = pd.cut(X['HouseYear'], bins=bins, labels=False) X['year_cat'].fillna(-1, inplace=True) return X train_df = pd.read_csv(TRAIN_DATASET_PATH) test_df = pd.read_csv(TEST_DATASET_PATH) preprocessor = DataPreprocessing() preprocessor.fit(train_df) train_df = preprocessor.transform(train_df) test_df = preprocessor.transform(test_df) features_gen = FeatureGenetator() features_gen.fit(train_df, train_df['Price']) train_df = features_gen.transform(train_df) add_dummies_columns_train = features_gen.add_dummies_columns test_df = features_gen.transform(test_df) add_dummies_columns_test = features_gen.add_dummies_columns feature_names_list = list(add_dummies_columns_train) + ['Ecology_1'] target_name = 'Price' X = train_df[feature_names_list] y = train_df[target_name] test_df = test_df[feature_names_list] model = Lasso(0.05) model.fit(X_train, y_train) y_train_preds = model.predict(X_train) y_test_preds = model.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds) predictions = model.predict(test_df) predictions
code
48162853/cell_9
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression, Lasso model = Lasso(0.05) model.fit(X_train, y_train)
code
48162853/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
from datetime import datetime from sklearn.linear_model import LinearRegression, Lasso from sklearn.model_selection import KFold, GridSearchCV from sklearn.model_selection import train_test_split, cross_val_score import matplotlib.pyplot as plt import pandas as pd import random import seaborn as sns def evaluate_preds(train_true_values, train_pred_values, test_true_values, test_pred_values): pass TRAIN_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/train.csv' TEST_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/test.csv' class DataPreprocessing: """Подготовка исходных данных""" def __init__(self): """Параметры класса""" self.medians = None self.kitchen_square_quantile = None def fit(self, X): """Сохранение статистик""" self.medians = X.median() self.kitchen_square_quantile = X['KitchenSquare'].quantile(0.975) def transform(self, X): """Трансформация данных""" X['Rooms_outlier'] = 0 X.loc[(X['Rooms'] == 0) | (X['Rooms'] >= 6), 'Rooms_outlier'] = 1 X.loc[X['Rooms'] == 0, 'Rooms'] = 1 X.loc[X['Rooms'] >= 6, 'Rooms'] = self.medians['Rooms'] condition = X['KitchenSquare'].isna() | (X['KitchenSquare'] > 16) X.loc[condition, 'KitchenSquare'] = self.medians['KitchenSquare'] X.loc[X['KitchenSquare'] < 6, 'KitchenSquare'] = 6 X['HouseFloor_outlier'] = 0 X.loc[X['HouseFloor'] == 0, 'HouseFloor_outlier'] = 1 X.loc[X['Floor'] > X['HouseFloor'], 'HouseFloor_outlier'] = 1 X.loc[X['HouseFloor'] == 0, 'HouseFloor'] = self.medians['HouseFloor'] floor_outliers = X.loc[X['Floor'] > X['HouseFloor']].index X.loc[floor_outliers, 'Floor'] = X.loc[floor_outliers, 'HouseFloor'].apply(lambda x: random.randint(1, x)) current_year = datetime.now().year X['HouseYear_outlier'] = 0 X.loc[X['HouseYear'] > current_year, 'HouseYear_outlier'] = 1 X.loc[X['HouseYear'] > current_year, 'HouseYear'] = current_year if 'Healthcare_1' in X.columns: X.drop('Healthcare_1', axis=1, inplace=True) X['LifeSquare_nan'] = X['LifeSquare'].isna() * 1 condition = X['LifeSquare'].isna() & ~X['Square'].isna() & ~X['KitchenSquare'].isna() X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X.fillna(self.medians, inplace=True) X['TestSquare'] = X['Square'] - X['LifeSquare'] - X['KitchenSquare'] condition1 = (X['TestSquare'] < 0) | (X['LifeSquare'] < 10) & (X['LifeSquare'] > 0) X.loc[condition1, 'LifeSquare'] = X.loc[condition1, 'Square'] - X.loc[condition1, 'KitchenSquare'] - 10 if 'TestSquare' in X.columns: X.drop('TestSquare', axis=1, inplace=True) return X class FeatureGenetator: """Генерация новых фич""" def __init__(self): self.floor_max = None self.agg_table_rooms_district = None self.agg_table_rooms = None self.add_dummies_columns = None def fit(self, X, y=None): X = X.copy() df = X[(X['LifeSquare'] > 0) & (X['LifeSquare'] < 275)] df['min_square'] = df['Square'] df['max_square'] = df['Square'] df['avg_square'] = df['Square'] df['min_price'] = df['Price'] df['max_price'] = df['Price'] df['avg_price'] = df['Price'] if y is not None: self.agg_table_rooms_district = df.groupby(['DistrictId', 'Rooms'], as_index=False).agg({'Price': 'sum', 'Square': 'sum'}).rename(columns={'Price': 'SumPrice', 'Square': 'SumSquare'}) self.agg_table_rooms = df.groupby(['Rooms'], as_index=False).agg({'min_square': 'min', 'min_price': 'min', 'max_square': 'max', 'max_price': 'max', 'avg_square': 'mean', 'avg_price': 'mean'}) if y is not None: self.floor_max = df['Floor'].max() self.house_year_max = df['HouseYear'].max() df = self.floor_to_cat(df) df = self.year_to_cat(df) def transform(self, X): X = self.floor_to_cat(X) X = self.year_to_cat(X) if self.agg_table_rooms_district is not None: X = X.merge(self.agg_table_rooms_district, on=['DistrictId', 'Rooms'], how='left') if self.agg_table_rooms is not None: X = X.merge(self.agg_table_rooms, on=['Rooms'], how='left') if 'Price' in X.columns: X['PriceCoeff'] = (X['Price'] - X['min_price']) / (X['max_price'] - X['min_price']) else: X['PriceCoeff'] = random.random() condition = (X['LifeSquare'] < 0) | (X['LifeSquare'] > 275) X.loc[condition, 'Square'] = X.loc[condition, 'min_square'] + (X.loc[condition, 'max_square'] - X.loc[condition, 'min_square']) * X.loc[condition, 'PriceCoeff'] X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X['base_price'] = X['SumPrice'] / X['SumSquare'] X['base_price'].fillna(1000, inplace=True) X['SquareCoeff'] = (X['Square'] - X['min_square']) / (X['max_square'] - X['min_square']) X = self.SquareCoeff_to_cat(X) X = self.KitchenSquare_to_cat(X) X = self.HouseFloor_to_cat(X) X = self.Social_1_to_cat(X) X = self.Social_2_to_cat(X) X = self.Social_3_to_cat(X) columns_old = set(X.columns.tolist()) X = pd.get_dummies(X.copy(), columns=['year_cat', 'floor_cat', 'Ecology_2', 'Ecology_3', 'Shops_2', 'SquareCoeff_cat', 'KitchenSquare_cat', 'house_floor_cat', 'Social_1_cat', 'Social_2_cat', 'Social_3_cat']) X['Ecology_1'] = X['Ecology_1'] * X['base_price'] * X['Square'] columns_new = set(X.columns.tolist()) self.add_dummies_columns = columns_new - columns_old for col_name in list(self.add_dummies_columns): X[col_name] = X[col_name] * X['base_price'] * X['Square'] return X def Social_1_to_cat(self, X): bins = [0, 10, 25, 50, X['Social_1'].max()] X['Social_1_cat'] = pd.cut(X['Social_1'], bins=bins, labels=False) X['Social_1_cat'].fillna(0, inplace=True) return X def Social_2_to_cat(self, X): bins = [0, 500, 1000, 5000, 10000, X['Social_2'].max()] X['Social_2_cat'] = pd.cut(X['Social_2'], bins=bins, labels=False) X['Social_2_cat'].fillna(0, inplace=True) return X def Social_3_to_cat(self, X): bins = [0, 3, 20, 60, 100, X['Social_3'].max()] X['Social_3_cat'] = pd.cut(X['Social_3'], bins=bins, labels=False) X['Social_3_cat'].fillna(0, inplace=True) return X def KitchenSquare_to_cat(self, X): bins = [0, 9, 12, X['KitchenSquare'].max()] X['KitchenSquare_cat'] = pd.cut(X['KitchenSquare'], bins=bins, labels=False) X['KitchenSquare_cat'].fillna(0, inplace=True) return X def SquareCoeff_to_cat(self, X): bins = [0, 2, 4, 6, 8, 10] X['SquareCoeff_cat'] = pd.cut(X['SquareCoeff'] * 10, bins=bins, labels=False) X['SquareCoeff_cat'].fillna(0, inplace=True) return X def shops_to_cat(self, X): bins = [0, 2, 6, 16, self.floor_max] X['floor_cat'] = pd.cut(X['Shops_1'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def HouseFloor_to_cat(self, X): bins = [0, 5, 9, 24, 35, X['HouseFloor'].max()] X['house_floor_cat'] = pd.cut(X['HouseFloor'], bins=bins, labels=False) X['house_floor_cat'].fillna(-1, inplace=True) return X def floor_to_cat(self, X): bins = [0, 3, 5, 9, 15, X['Floor'].max()] X['floor_cat'] = pd.cut(X['Floor'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def year_to_cat(self, X): bins = [0, 1925, 1941, 1945, 1955, 1965, 1985, 1995, 2005, self.house_year_max] X['year_cat'] = pd.cut(X['HouseYear'], bins=bins, labels=False) X['year_cat'].fillna(-1, inplace=True) return X train_df = pd.read_csv(TRAIN_DATASET_PATH) test_df = pd.read_csv(TEST_DATASET_PATH) preprocessor = DataPreprocessing() preprocessor.fit(train_df) train_df = preprocessor.transform(train_df) test_df = preprocessor.transform(test_df) features_gen = FeatureGenetator() features_gen.fit(train_df, train_df['Price']) train_df = features_gen.transform(train_df) add_dummies_columns_train = features_gen.add_dummies_columns test_df = features_gen.transform(test_df) add_dummies_columns_test = features_gen.add_dummies_columns feature_names_list = list(add_dummies_columns_train) + ['Ecology_1'] target_name = 'Price' X = train_df[feature_names_list] y = train_df[target_name] test_df = test_df[feature_names_list] model = Lasso(0.05) model.fit(X_train, y_train) y_train_preds = model.predict(X_train) y_test_preds = model.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds) cv_score = cross_val_score(model, X, y, scoring='r2', cv=KFold(n_splits=3, shuffle=True, random_state=34)) cv_score
code
48162853/cell_10
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression, Lasso import matplotlib.pyplot as plt import seaborn as sns def evaluate_preds(train_true_values, train_pred_values, test_true_values, test_pred_values): pass TRAIN_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/train.csv' TEST_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/test.csv' model = Lasso(0.05) model.fit(X_train, y_train) y_train_preds = model.predict(X_train) y_test_preds = model.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds)
code
48162853/cell_12
[ "text_plain_output_1.png" ]
from datetime import datetime from sklearn.linear_model import LinearRegression, Lasso from sklearn.model_selection import KFold, GridSearchCV from sklearn.model_selection import train_test_split, cross_val_score import matplotlib.pyplot as plt import pandas as pd import random import seaborn as sns def evaluate_preds(train_true_values, train_pred_values, test_true_values, test_pred_values): pass TRAIN_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/train.csv' TEST_DATASET_PATH = '/kaggle/input/real-estate-price-prediction-moscow/test.csv' class DataPreprocessing: """Подготовка исходных данных""" def __init__(self): """Параметры класса""" self.medians = None self.kitchen_square_quantile = None def fit(self, X): """Сохранение статистик""" self.medians = X.median() self.kitchen_square_quantile = X['KitchenSquare'].quantile(0.975) def transform(self, X): """Трансформация данных""" X['Rooms_outlier'] = 0 X.loc[(X['Rooms'] == 0) | (X['Rooms'] >= 6), 'Rooms_outlier'] = 1 X.loc[X['Rooms'] == 0, 'Rooms'] = 1 X.loc[X['Rooms'] >= 6, 'Rooms'] = self.medians['Rooms'] condition = X['KitchenSquare'].isna() | (X['KitchenSquare'] > 16) X.loc[condition, 'KitchenSquare'] = self.medians['KitchenSquare'] X.loc[X['KitchenSquare'] < 6, 'KitchenSquare'] = 6 X['HouseFloor_outlier'] = 0 X.loc[X['HouseFloor'] == 0, 'HouseFloor_outlier'] = 1 X.loc[X['Floor'] > X['HouseFloor'], 'HouseFloor_outlier'] = 1 X.loc[X['HouseFloor'] == 0, 'HouseFloor'] = self.medians['HouseFloor'] floor_outliers = X.loc[X['Floor'] > X['HouseFloor']].index X.loc[floor_outliers, 'Floor'] = X.loc[floor_outliers, 'HouseFloor'].apply(lambda x: random.randint(1, x)) current_year = datetime.now().year X['HouseYear_outlier'] = 0 X.loc[X['HouseYear'] > current_year, 'HouseYear_outlier'] = 1 X.loc[X['HouseYear'] > current_year, 'HouseYear'] = current_year if 'Healthcare_1' in X.columns: X.drop('Healthcare_1', axis=1, inplace=True) X['LifeSquare_nan'] = X['LifeSquare'].isna() * 1 condition = X['LifeSquare'].isna() & ~X['Square'].isna() & ~X['KitchenSquare'].isna() X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X.fillna(self.medians, inplace=True) X['TestSquare'] = X['Square'] - X['LifeSquare'] - X['KitchenSquare'] condition1 = (X['TestSquare'] < 0) | (X['LifeSquare'] < 10) & (X['LifeSquare'] > 0) X.loc[condition1, 'LifeSquare'] = X.loc[condition1, 'Square'] - X.loc[condition1, 'KitchenSquare'] - 10 if 'TestSquare' in X.columns: X.drop('TestSquare', axis=1, inplace=True) return X class FeatureGenetator: """Генерация новых фич""" def __init__(self): self.floor_max = None self.agg_table_rooms_district = None self.agg_table_rooms = None self.add_dummies_columns = None def fit(self, X, y=None): X = X.copy() df = X[(X['LifeSquare'] > 0) & (X['LifeSquare'] < 275)] df['min_square'] = df['Square'] df['max_square'] = df['Square'] df['avg_square'] = df['Square'] df['min_price'] = df['Price'] df['max_price'] = df['Price'] df['avg_price'] = df['Price'] if y is not None: self.agg_table_rooms_district = df.groupby(['DistrictId', 'Rooms'], as_index=False).agg({'Price': 'sum', 'Square': 'sum'}).rename(columns={'Price': 'SumPrice', 'Square': 'SumSquare'}) self.agg_table_rooms = df.groupby(['Rooms'], as_index=False).agg({'min_square': 'min', 'min_price': 'min', 'max_square': 'max', 'max_price': 'max', 'avg_square': 'mean', 'avg_price': 'mean'}) if y is not None: self.floor_max = df['Floor'].max() self.house_year_max = df['HouseYear'].max() df = self.floor_to_cat(df) df = self.year_to_cat(df) def transform(self, X): X = self.floor_to_cat(X) X = self.year_to_cat(X) if self.agg_table_rooms_district is not None: X = X.merge(self.agg_table_rooms_district, on=['DistrictId', 'Rooms'], how='left') if self.agg_table_rooms is not None: X = X.merge(self.agg_table_rooms, on=['Rooms'], how='left') if 'Price' in X.columns: X['PriceCoeff'] = (X['Price'] - X['min_price']) / (X['max_price'] - X['min_price']) else: X['PriceCoeff'] = random.random() condition = (X['LifeSquare'] < 0) | (X['LifeSquare'] > 275) X.loc[condition, 'Square'] = X.loc[condition, 'min_square'] + (X.loc[condition, 'max_square'] - X.loc[condition, 'min_square']) * X.loc[condition, 'PriceCoeff'] X.loc[condition, 'LifeSquare'] = X.loc[condition, 'Square'] - X.loc[condition, 'KitchenSquare'] - 10 X['base_price'] = X['SumPrice'] / X['SumSquare'] X['base_price'].fillna(1000, inplace=True) X['SquareCoeff'] = (X['Square'] - X['min_square']) / (X['max_square'] - X['min_square']) X = self.SquareCoeff_to_cat(X) X = self.KitchenSquare_to_cat(X) X = self.HouseFloor_to_cat(X) X = self.Social_1_to_cat(X) X = self.Social_2_to_cat(X) X = self.Social_3_to_cat(X) columns_old = set(X.columns.tolist()) X = pd.get_dummies(X.copy(), columns=['year_cat', 'floor_cat', 'Ecology_2', 'Ecology_3', 'Shops_2', 'SquareCoeff_cat', 'KitchenSquare_cat', 'house_floor_cat', 'Social_1_cat', 'Social_2_cat', 'Social_3_cat']) X['Ecology_1'] = X['Ecology_1'] * X['base_price'] * X['Square'] columns_new = set(X.columns.tolist()) self.add_dummies_columns = columns_new - columns_old for col_name in list(self.add_dummies_columns): X[col_name] = X[col_name] * X['base_price'] * X['Square'] return X def Social_1_to_cat(self, X): bins = [0, 10, 25, 50, X['Social_1'].max()] X['Social_1_cat'] = pd.cut(X['Social_1'], bins=bins, labels=False) X['Social_1_cat'].fillna(0, inplace=True) return X def Social_2_to_cat(self, X): bins = [0, 500, 1000, 5000, 10000, X['Social_2'].max()] X['Social_2_cat'] = pd.cut(X['Social_2'], bins=bins, labels=False) X['Social_2_cat'].fillna(0, inplace=True) return X def Social_3_to_cat(self, X): bins = [0, 3, 20, 60, 100, X['Social_3'].max()] X['Social_3_cat'] = pd.cut(X['Social_3'], bins=bins, labels=False) X['Social_3_cat'].fillna(0, inplace=True) return X def KitchenSquare_to_cat(self, X): bins = [0, 9, 12, X['KitchenSquare'].max()] X['KitchenSquare_cat'] = pd.cut(X['KitchenSquare'], bins=bins, labels=False) X['KitchenSquare_cat'].fillna(0, inplace=True) return X def SquareCoeff_to_cat(self, X): bins = [0, 2, 4, 6, 8, 10] X['SquareCoeff_cat'] = pd.cut(X['SquareCoeff'] * 10, bins=bins, labels=False) X['SquareCoeff_cat'].fillna(0, inplace=True) return X def shops_to_cat(self, X): bins = [0, 2, 6, 16, self.floor_max] X['floor_cat'] = pd.cut(X['Shops_1'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def HouseFloor_to_cat(self, X): bins = [0, 5, 9, 24, 35, X['HouseFloor'].max()] X['house_floor_cat'] = pd.cut(X['HouseFloor'], bins=bins, labels=False) X['house_floor_cat'].fillna(-1, inplace=True) return X def floor_to_cat(self, X): bins = [0, 3, 5, 9, 15, X['Floor'].max()] X['floor_cat'] = pd.cut(X['Floor'], bins=bins, labels=False) X['floor_cat'].fillna(-1, inplace=True) return X def year_to_cat(self, X): bins = [0, 1925, 1941, 1945, 1955, 1965, 1985, 1995, 2005, self.house_year_max] X['year_cat'] = pd.cut(X['HouseYear'], bins=bins, labels=False) X['year_cat'].fillna(-1, inplace=True) return X train_df = pd.read_csv(TRAIN_DATASET_PATH) test_df = pd.read_csv(TEST_DATASET_PATH) preprocessor = DataPreprocessing() preprocessor.fit(train_df) train_df = preprocessor.transform(train_df) test_df = preprocessor.transform(test_df) features_gen = FeatureGenetator() features_gen.fit(train_df, train_df['Price']) train_df = features_gen.transform(train_df) add_dummies_columns_train = features_gen.add_dummies_columns test_df = features_gen.transform(test_df) add_dummies_columns_test = features_gen.add_dummies_columns feature_names_list = list(add_dummies_columns_train) + ['Ecology_1'] target_name = 'Price' X = train_df[feature_names_list] y = train_df[target_name] test_df = test_df[feature_names_list] model = Lasso(0.05) model.fit(X_train, y_train) y_train_preds = model.predict(X_train) y_test_preds = model.predict(X_test) evaluate_preds(y_train, y_train_preds, y_test, y_test_preds) cv_score = cross_val_score(model, X, y, scoring='r2', cv=KFold(n_splits=3, shuffle=True, random_state=34)) cv_score cv_score.mean()
code
129033938/cell_21
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing import image import cv2 import cv2 import keras import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) img = cv2.imread('/kaggle/input/skin-cancer/skin caner/Normal/0_0_aidai_0029.jpg') img = cv2.resize(img, (224, 224)) img = np.expand_dims(img, axis=0) img_class = model.predict(img, batch_size=1) score = tf.nn.softmax(img_class[0]) score = np.argmax(score) score
code
129033938/cell_13
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred
code
129033938/cell_4
[ "text_plain_output_1.png" ]
import cv2 import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 print(C)
code
129033938/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing import image import cv2 import cv2 import keras import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) img = cv2.imread('/kaggle/input/skin-cancer/skin caner/Normal/0_0_aidai_0029.jpg') img = cv2.resize(img, (224, 224)) img = np.expand_dims(img, axis=0) img_class = model.predict(img, batch_size=1) score = tf.nn.softmax(img_class[0]) score = np.argmax(score) label = '' if score == 0: label = 'Normal' elif score == 1: label = 'melanoma' elif score == 2: label = 'nevus' elif score == 3: label = 'pigmented benign keratosis' label
code
129033938/cell_6
[ "text_html_output_1.png" ]
import cv2 import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape
code
129033938/cell_26
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from keras.utils import load_img, img_to_array from mpl_toolkits.axes_grid1 import ImageGrid from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing import image import cv2 import cv2 import keras import keras.utils as image import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) img = cv2.imread('/kaggle/input/skin-cancer/skin caner/Normal/0_0_aidai_0029.jpg') img = cv2.resize(img, (224, 224)) img = np.expand_dims(img, axis=0) img_class = model.predict(img, batch_size=1) score = tf.nn.softmax(img_class[0]) score = np.argmax(score) label = '' if score == 0: label = 'Normal' elif score == 1: label = 'melanoma' elif score == 2: label = 'nevus' elif score == 3: label = 'pigmented benign keratosis' from keras.utils import load_img, img_to_array import keras.utils as image def predict_image_class(image_path,true_value): img = cv2.imread(image_path) img = cv2.resize(img,(224,224)) img = np.expand_dims(img, axis=0) img_class = model.predict(img, batch_size=1) score = tf.nn.softmax(img_class[0]) score=np.argmax(score) label='' if score==0: label='Normal' elif score==1: label='melanoma' elif score==2: label='nevus' elif score==3: label='pigmented benign keratosis' print( "This image most likely belongs to {}" .format(label) ) # for folder_name in our_folders: fig = plt.figure(1, figsize=(10, 10)) grid = ImageGrid(fig, 111, nrows_ncols=(1, 1), axes_pad=0.05) ax = grid[0] img = load_img(image_path, (224, 224)) img = np.array(img.convert('RGB')) img = image.img_to_array(img) ax.imshow(img / 255.) ax.text(10, 100, 'True Label: %s' % true_value.upper(), color='g', backgroundcolor='w',\ alpha=0.8, size = 20) ax.text(10, 150, 'Predicted Label: %s' % label.upper(), color='k', backgroundcolor='w',\ alpha=0.8, size = 20) ax.axis('off') plt.show() predict_image_class('/kaggle/input/skin-cancer/skin caner/pigmented benign keratosis/ISIC_0024495.jpg', 'pigmented')
code
129033938/cell_2
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns import glob import cv2
code
129033938/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1)
code
129033938/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 from tensorflow.keras.optimizers import Adam from tensorflow.keras.preprocessing import image import cv2 import cv2 import keras import numpy as np # linear algebra import os import tensorflow as tf import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) img = cv2.imread('/kaggle/input/skin-cancer/skin caner/Normal/0_0_aidai_0029.jpg') img = cv2.resize(img, (224, 224)) img = np.expand_dims(img, axis=0) img_class = model.predict(img, batch_size=1) score = tf.nn.softmax(img_class[0])
code
129033938/cell_28
[ "text_plain_output_1.png" ]
from IPython.display import FileLink from IPython.display import FileLink FileLink('cancer_detection_using_VGG19.h5')
code
129033938/cell_15
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) from sklearn.metrics import * print(classification_report(y_test, y_pred))
code
129033938/cell_16
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) cm = confusion_matrix(y_test, y_pred) print(cm)
code
129033938/cell_17
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import seaborn as sns import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_pred y_test = np.argmax(y_test, axis=1) cm = confusion_matrix(y_test, y_pred) sns.heatmap(cm, annot=True, fmt='.1f')
code
129033938/cell_24
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 label_dict['Normal']
code
129033938/cell_12
[ "text_plain_output_1.png" ]
from keras.optimizers import Adam from sklearn.model_selection import train_test_split from tensorflow.keras.applications import EfficientNetB0,EfficientNetB3,vgg19 import cv2 import keras import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape label = np.array(label) label.shape num_classes = 4 label = keras.utils.to_categorical(label, num_classes) model = vgg19.VGG19(include_top=True, weights=None, input_shape=(224, 224, 3), classes=4) model.compile(loss='categorical_crossentropy', optimizer=Adam(0.0001), metrics=['accuracy']) acc = [] for i in range(5): x_train, x_test, y_train, y_test = train_test_split(data, label, test_size=0.2, random_state=np.random.randint(1, 1000, 1)[0]) x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=0.1, random_state=np.random.randint(1, 1000, 1)[0]) model.fit(x_train, y_train, validation_data=(x_valid, y_valid), epochs=15, batch_size=16, verbose=1) acc
code
129033938/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import numpy as np # linear algebra import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: os.path.join(dirname, filename) np.random.seed(1234) path = '/kaggle/input/skin-cancer/skin caner' img_list = os.listdir(path) lables = [i for i in range(len(img_list))] label_dict = dict() label_dict['Normal'] = 0 label_dict['melanoma'] = 1 label_dict['nevus'] = 2 label_dict['pigmented benign keratosis'] = 3 data = [] label = [] C = 0 for cat in img_list: C = 0 pic_list = os.path.join(path, cat) for img in os.listdir(pic_list): image = os.path.join(pic_list, img) if image == '/kaggle/input/skin-cancer/skin caner/Normal/34.avif': continue else: image = cv2.imread(image) image = cv2.resize(image, (224, 224)) data.append(image) label.append(label_dict[cat]) C += 1 data = np.array(data) data.shape
code
2042602/cell_13
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.linear_model import LinearRegression import numpy as np # linear algebra clf = LinearRegression() clf.fit(X_train, y_train) predictions = clf.predict(X_test) print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
code