path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104119399/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8) plt.show()
code
104119399/cell_13
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes) plt.show()
code
104119399/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8, labeldistance=1.3) plt.show()
code
104119399/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8, shadow=True) plt.show()
code
104119399/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8, radius=1.5) plt.show()
code
104119399/cell_11
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student) plt.show()
code
104119399/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0)) plt.show()
code
104119399/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] label = np.ones(20) colors = ['r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w', 'r', 'w'] plt.pie([1], colors='m', radius=2.2) plt.pie([1], colors='r', radius=2.0) plt.pie([1], colors='c', radius=1.8) plt.pie(label, colors=colors, radius=1.6) plt.pie([1], colors='y', radius=1.4) plt.show()
code
104119399/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, colors=['r', 'peru', 'm', 'olivedrab', 'g']) plt.show()
code
104119399/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%') plt.show()
code
104119399/cell_31
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8, counterclock=False) plt.show()
code
104119399/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt classes = ['Python', 'R', 'AI', 'ML', 'DS'] class1_student = [45, 25, 35, 40, 30] plt.pie(class1_student, labels=classes, autopct='%0.1f%%', explode=(0, 0, 0, 0.2, 0), pctdistance=0.8, startangle=90) plt.show()
code
105187920/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] df['Area Locality'].value_counts(normalize=True) * 100
code
105187920/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') print(f'Dataset shape -> {df.shape}') df.head()
code
105187920/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] (df['Rent'] > 150000).sum()
code
105187920/cell_20
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) print(df['Floor House'].unique()) print(df['Floor Building'].unique()) df.drop('Floor', axis=1, inplace=True)
code
105187920/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') colors = ['#E557C4', '#57C4E5', '#293241'] def count(df, x, ax, main_color): ax.bar_label(ax.containers[0], color='black', fontsize='large') ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) ax.tick_params(axis='x', which='both', bottom=False, top=False) sns.despine(ax=ax, left=True) count_cols = ['Area Type', 'City', 'Furnishing Status', 'Tenant Preferred', "Bathroom", 'Point of Contact', "BHK"] fig, axs = plt.subplots(4, 2, figsize=(20, 22), sharey=True) for i, col in enumerate(count_cols): r_i = i // 2 c_i = i% 2 count(df, col, axs[r_i][c_i], colors[0]) fig.delaxes(axs[3,1]) plt.show() df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] def hist(df, x, ax, main_color, meanline=True, mean_color=colors[2]): ax.tick_params(axis='y', which='both', left=False, right=False) sns.despine(ax=ax, left=True) numeric_cols = ['Posted On', 'Size', 'Floor House', 'Floor Building'] fig, axs = plt.subplots(2, 2, figsize=(30, 15)) for i, num_col in enumerate(numeric_cols): is_not_date = num_col != 'Posted On' r_i = i // 2 c_i = i % 2 hist(df, num_col, axs[r_i][c_i], colors[1], meanline=is_not_date) plt.show()
code
105187920/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105187920/cell_19
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor'].nunique()
code
105187920/cell_32
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') colors = ['#E557C4', '#57C4E5', '#293241'] def count(df, x, ax, main_color): ax.bar_label(ax.containers[0], color='black', fontsize='large') ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) ax.tick_params(axis='x', which='both', bottom=False, top=False) sns.despine(ax=ax, left=True) count_cols = ['Area Type', 'City', 'Furnishing Status', 'Tenant Preferred', "Bathroom", 'Point of Contact', "BHK"] fig, axs = plt.subplots(4, 2, figsize=(20, 22), sharey=True) for i, col in enumerate(count_cols): r_i = i // 2 c_i = i% 2 count(df, col, axs[r_i][c_i], colors[0]) fig.delaxes(axs[3,1]) plt.show() df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] def hist(df, x, ax, main_color, meanline=True, mean_color=colors[2]): ax.tick_params(axis='y', which='both', left=False, right=False) sns.despine(ax=ax, left=True) numeric_cols = ["Posted On", "Size", "Floor House", "Floor Building"] fig, axs = plt.subplots(2, 2, figsize=(30, 15)) for i, num_col in enumerate(numeric_cols): is_not_date = (num_col != "Posted On") r_i = i // 2 c_i = i % 2 hist(df, num_col, axs[r_i][c_i], colors[1], meanline=is_not_date) plt.show() fig, axs = plt.subplots(1, 2, figsize=(20, 6)) for ax in axs: sns.stripplot(y=df['Rent'], ax=ax, color=colors[1], size=3, alpha=0.7, linewidth=0.1, edgecolor='black') ax.axhline(np.mean(df['Rent']), linestyle='--', color=colors[0], label='Mean', linewidth=1) ax.axhline(np.median(df['Rent']), linestyle=':', color=colors[2], label='Median', linewidth=1) ax.set_ylabel('') ax.set_xlabel('Rent') ax.tick_params(axis='y', which='both', left=False, right=False) ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) sns.despine(ax=ax, left=True, bottom=True) ax.grid(axis='y', linewidth=0.2) ax.legend() axs[0].set(title='Without logaritmic scale on y-axis') axs[1].set(yscale='log', title='With logaritmic scale on y-axis') plt.show()
code
105187920/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] df['Rent'].describe()
code
105187920/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') colors = ['#E557C4', '#57C4E5', '#293241'] def count(df, x, ax, main_color): ax.bar_label(ax.containers[0], color='black', fontsize='large') ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) ax.tick_params(axis='x', which='both', bottom=False, top=False) sns.despine(ax=ax, left=True) count_cols = ['Area Type', 'City', 'Furnishing Status', 'Tenant Preferred', 'Bathroom', 'Point of Contact', 'BHK'] fig, axs = plt.subplots(4, 2, figsize=(20, 22), sharey=True) for i, col in enumerate(count_cols): r_i = i // 2 c_i = i % 2 count(df, col, axs[r_i][c_i], colors[0]) fig.delaxes(axs[3, 1]) plt.show()
code
105187920/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] print(df['Floor House'].unique())
code
105187920/cell_37
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df = df[~(df['Area Type'] == 'Built Area')] df = df[~(df['Point of Contact'] == 'Contact Builder')] df = df[~(df['Bathroom'] > 6)] df['Floor House'] = df['Floor'].str.split(' ').str.get(0) df['Floor Building'] = df['Floor'].str.split(' ').str.get(-1) df.drop('Floor', axis=1, inplace=True) df.loc[df['Floor House'] == 'Ground', 'Floor House'] = 0 df.loc[df['Floor Building'] == 'Ground', 'Floor Building'] = 0 df.loc[df['Floor House'] == 'Lower', 'Floor House'] = 0 df.loc[df['Floor House'] == 'Upper', 'Floor House'] = df.loc[df['Floor House'] == 'Upper', 'Floor Building'] df = df[df['Rent'] < 100000] print(df.Rent.describe())
code
105187920/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt df = pd.read_csv('/kaggle/input/house-rent-prediction-dataset/House_Rent_Dataset.csv') df.info()
code
73063905/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('submission.csv') df.to_csv('submission.csv', index=False)
code
2001825/cell_9
[ "text_plain_output_1.png" ]
from sklearn import metrics from sklearn.cross_validation import StratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score,make_scorer from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.naive_bayes import GaussianNB from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier,plot_importance import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Iris.csv') lb_make = LabelEncoder() dataset['Species'] = dataset['Species'].astype('category') dataset['SepalRatio'] = np.divide(dataset['SepalLengthCm'], dataset['SepalWidthCm']) dataset['PetalRatio'] = np.divide(dataset['PetalLengthCm'], dataset['PetalWidthCm']) X_all = dataset[['Id', 'SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'SepalRatio', 'PetalRatio']] y_all = lb_make.fit_transform(dataset['Species']) num_test = 0.2 X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=88) acc_scorer = make_scorer(accuracy_score) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() parametersLR = {'C': [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1]} LRmodel = GridSearchCV(clf, parametersLR, scoring=acc_scorer) _ = LRmodel.fit(X_all, y_all) from sklearn.naive_bayes import GaussianNB NBmodel = GaussianNB() _ = NBmodel.fit(X_all, y_all) from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier() parametersKN = {'n_neighbors': [1, 2, 4, 8, 16]} KNmodel = GridSearchCV(clf, parametersKN, scoring=acc_scorer) _ = KNmodel.fit(X_all, y_all) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() parametersDT = {'criterion': ['gini', 'entropy'], 'max_depth': [1, 2, 3, 4]} DTmodel = GridSearchCV(clf, parametersDT, scoring=acc_scorer) _ = DTmodel.fit(X_all, y_all) from sklearn.svm import SVC clf = SVC() parametersSV = {'C': [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid']} SVmodel = GridSearchCV(clf, parametersSV, scoring=acc_scorer) _ = SVmodel.fit(X_all, y_all) from xgboost import XGBClassifier, plot_importance clf = XGBClassifier() parametersXG = {'n_estimators': [50, 100, 150, 200], 'max_depth': [2, 4, 6, 8]} kfold = StratifiedKFold(y_all, n_folds=10, shuffle=True, random_state=42) XGmodel = GridSearchCV(clf, parametersXG, scoring=acc_scorer, n_jobs=-1, cv=kfold, verbose=1) _ = XGmodel.fit(X_all, y_all) expected = y_all LRpredicted = LRmodel.predict(X_all) LRpredictions = lb_make.inverse_transform(LRpredicted) LRpredictions = pd.DataFrame(LRpredictions) print('The results for the Logistic Regression are:\n') print(metrics.classification_report(expected, LRpredicted)) print(metrics.confusion_matrix(expected, LRpredicted)) NBpredicted = NBmodel.predict(X_all) NBpredictions = lb_make.inverse_transform(NBpredicted) NBpredictions = pd.DataFrame(NBpredictions) print('The results for the Naive Bayes are:\n') print(metrics.classification_report(expected, NBpredicted)) print(metrics.confusion_matrix(expected, NBpredicted)) KNpredicted = KNmodel.predict(X_all) KNpredictions = lb_make.inverse_transform(KNpredicted) KNpredictions = pd.DataFrame(KNpredictions) print('The results for kNN are:\n') print(metrics.classification_report(expected, KNpredicted)) print(metrics.confusion_matrix(expected, KNpredicted)) DTpredicted = DTmodel.predict(X_all) DTpredictions = lb_make.inverse_transform(DTpredicted) DTpredictions = pd.DataFrame(DTpredictions) print('The results for the Decision tree are:\n') print(metrics.classification_report(expected, DTpredicted)) print(metrics.confusion_matrix(expected, DTpredicted)) SVpredicted = SVmodel.predict(X_all) SVpredictions = lb_make.inverse_transform(SVpredicted) SVpredictions = pd.DataFrame(SVpredictions) print('The results for the support vector machine are:\n') print(metrics.classification_report(expected, SVpredicted)) print(metrics.confusion_matrix(expected, SVpredicted)) XGpredicted = XGmodel.predict(X_all) XGpredictions = lb_make.inverse_transform(XGpredicted) XGpredictions = pd.DataFrame(XGpredictions) print('The results for the XGBoost are:\n') print(metrics.classification_report(expected, XGpredicted)) print(metrics.confusion_matrix(expected, XGpredicted)) print('\nAcc. LogReg: {0}'.format(accuracy_score(expected, LRpredicted))) print('\nAcc. NaiveBayes: {0}'.format(accuracy_score(expected, NBpredicted))) print('\nAcc. kNN: {0}'.format(accuracy_score(expected, KNpredicted))) print('\nAcc. DecTree: {0}'.format(accuracy_score(expected, DTpredicted))) print('\nAcc. SVM: {0}'.format(accuracy_score(expected, SVpredicted))) print('\nAcc. XGBoost: {0}'.format(accuracy_score(expected, XGpredicted)))
code
2001825/cell_2
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,make_scorer from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import LabelEncoder import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Iris.csv') print(dataset.head()) lb_make = LabelEncoder() dataset['Species'] = dataset['Species'].astype('category') dataset['SepalRatio'] = np.divide(dataset['SepalLengthCm'], dataset['SepalWidthCm']) dataset['PetalRatio'] = np.divide(dataset['PetalLengthCm'], dataset['PetalWidthCm']) X_all = dataset[['Id', 'SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'SepalRatio', 'PetalRatio']] print(X_all.head()) y_all = lb_make.fit_transform(dataset['Species']) num_test = 0.2 X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=88) acc_scorer = make_scorer(accuracy_score)
code
2001825/cell_1
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from sklearn import datasets from sklearn import metrics from sklearn.metrics import accuracy_score, make_scorer from sklearn import preprocessing from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.preprocessing import LabelEncoder from sklearn.cross_validation import StratifiedKFold
code
2001825/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.cross_validation import StratifiedKFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score,make_scorer from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import LabelEncoder from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from xgboost import XGBClassifier,plot_importance import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/Iris.csv') lb_make = LabelEncoder() dataset['Species'] = dataset['Species'].astype('category') dataset['SepalRatio'] = np.divide(dataset['SepalLengthCm'], dataset['SepalWidthCm']) dataset['PetalRatio'] = np.divide(dataset['PetalLengthCm'], dataset['PetalWidthCm']) X_all = dataset[['Id', 'SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm', 'SepalRatio', 'PetalRatio']] y_all = lb_make.fit_transform(dataset['Species']) num_test = 0.2 X_train, X_test, y_train, y_test = train_test_split(X_all, y_all, test_size=num_test, random_state=88) acc_scorer = make_scorer(accuracy_score) from sklearn.linear_model import LogisticRegression clf = LogisticRegression() parametersLR = {'C': [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1]} LRmodel = GridSearchCV(clf, parametersLR, scoring=acc_scorer) _ = LRmodel.fit(X_all, y_all) from sklearn.neighbors import KNeighborsClassifier clf = KNeighborsClassifier() parametersKN = {'n_neighbors': [1, 2, 4, 8, 16]} KNmodel = GridSearchCV(clf, parametersKN, scoring=acc_scorer) _ = KNmodel.fit(X_all, y_all) from sklearn.tree import DecisionTreeClassifier clf = DecisionTreeClassifier() parametersDT = {'criterion': ['gini', 'entropy'], 'max_depth': [1, 2, 3, 4]} DTmodel = GridSearchCV(clf, parametersDT, scoring=acc_scorer) _ = DTmodel.fit(X_all, y_all) from sklearn.svm import SVC clf = SVC() parametersSV = {'C': [0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1], 'kernel': ['linear', 'poly', 'rbf', 'sigmoid']} SVmodel = GridSearchCV(clf, parametersSV, scoring=acc_scorer) _ = SVmodel.fit(X_all, y_all) from xgboost import XGBClassifier, plot_importance clf = XGBClassifier() parametersXG = {'n_estimators': [50, 100, 150, 200], 'max_depth': [2, 4, 6, 8]} kfold = StratifiedKFold(y_all, n_folds=10, shuffle=True, random_state=42) XGmodel = GridSearchCV(clf, parametersXG, scoring=acc_scorer, n_jobs=-1, cv=kfold, verbose=1) _ = XGmodel.fit(X_all, y_all)
code
121151674/cell_13
[ "text_plain_output_1.png" ]
errors
code
121151674/cell_19
[ "text_plain_output_1.png" ]
import numpy as np def params(num_neurons): W = [np.random.randn(y, x) for x, y in zip(num_neurons[:-1], num_neurons[1:])] b = [0.01 * np.random.randn(x, 1) for x in num_neurons[1:]] return (W, b) def sigmoid(z): sig = 1.0 / (1.0 + np.exp(-z)) return sig def feedforward(X, W, b): n = W.shape[0] m = X.shape[0] activation = np.zeros([m, n]) for i in range(n): a = sigmoid(np.dot(X, W[i, :]) + b[i]) activation[:, i] = a return activation def cost(yhat, y): err = -sum(y * np.log(yhat) + (1 - y) * np.log(1 - yhat)) / y.shape[0] return err def one_hot(i): e = np.zeros(10) e[i] = 1.0 return e y_train = np.array([one_hot(y) for y in train_data[1]]) def gradient(yhat, y, x2): common_on = (yhat - y) * (yhat * (1 - yhat)) gw_l = np.zeros([yhat.shape[1], x2.shape[1]]) for i in range(yhat.shape[1]): gw = common_on[:, i] * x2.T gw_l[i, :] = sum(gw.T) / y.shape[0] gb = sum(common_on) / y.shape[0] return (gw_l, gb.reshape(10, 1)) def gradient_l2(yhat, y, a1, X, w2): commo = (yhat - y) * (yhat * (1 - yhat)) gb_l = np.zeros([a1.shape[1], 1]) gw_l = np.zeros([a1.shape[1], X.shape[0]]) for i in range(a1.shape[1]): gd_b = np.dot(commo, w2[:, i]) * (a1[:, i] * (1 - a1[:, i])) gw = gd_b * X gw_l[i, :] = sum(gw.T) / y.shape[0] gb_l[i] = sum(gd_b) / y.shape[0] return (gw_l, gb_l) def gradient_descent(y, X, wo, bo, wh, bh, lr, epoch): costs = [] cost_batch = [] indexes = [] for i in range(epoch): v1 = feedforward(train_data[0], wh, bh) v2 = feedforward(v1, wo, bo) err = cost(v2, y_train) costs.append(err) gwh, gbh = gradient_l2(v2, y, v1, X, wo) gwo, gbo = gradient(v2, y, v1) wh = wh - lr * gwh bh = bh - lr * gbh wo = wo - lr * gwo bo = bo - lr * gbo weight_params = {'out_layer': [wo, bo], 'hid_layer': [wh, bh]} return (weight_params, costs) v1 = feedforward(train_data[0], parameters['hid_layer'][0], parameters['hid_layer'][1]) v2 = feedforward(v1, parameters['out_layer'][0], parameters['out_layer'][1]) yhat = v2.tolist() ineed = np.array([yhat[i].index(max(yhat[i])) for i in range(len(yhat))]) compaire = np.array([ineed, train_data[1]]) compaire = compaire.T compaires = compaire[compaire[:, 0] == compaire[:, 1]] compaires.shape sumi = 0 wrong = 0 for i in range(compaire.shape[0]): if compaire[i, 0] == compaire[i, 1]: sumi += 1 else: wrong += 1 print(f'model accuracy is: {sumi / compaire.shape[0]} and wrong prediction is: {wrong}/{compaire.shape[0]}')
code
121151674/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt def load_img(imge): img = imge.reshape(28, 28) * 255 plt.gray() load_img(test_data[0][100])
code
121151674/cell_17
[ "text_plain_output_1.png" ]
import numpy as np def params(num_neurons): W = [np.random.randn(y, x) for x, y in zip(num_neurons[:-1], num_neurons[1:])] b = [0.01 * np.random.randn(x, 1) for x in num_neurons[1:]] return (W, b) def sigmoid(z): sig = 1.0 / (1.0 + np.exp(-z)) return sig def feedforward(X, W, b): n = W.shape[0] m = X.shape[0] activation = np.zeros([m, n]) for i in range(n): a = sigmoid(np.dot(X, W[i, :]) + b[i]) activation[:, i] = a return activation def cost(yhat, y): err = -sum(y * np.log(yhat) + (1 - y) * np.log(1 - yhat)) / y.shape[0] return err def one_hot(i): e = np.zeros(10) e[i] = 1.0 return e y_train = np.array([one_hot(y) for y in train_data[1]]) def gradient(yhat, y, x2): common_on = (yhat - y) * (yhat * (1 - yhat)) gw_l = np.zeros([yhat.shape[1], x2.shape[1]]) for i in range(yhat.shape[1]): gw = common_on[:, i] * x2.T gw_l[i, :] = sum(gw.T) / y.shape[0] gb = sum(common_on) / y.shape[0] return (gw_l, gb.reshape(10, 1)) def gradient_l2(yhat, y, a1, X, w2): commo = (yhat - y) * (yhat * (1 - yhat)) gb_l = np.zeros([a1.shape[1], 1]) gw_l = np.zeros([a1.shape[1], X.shape[0]]) for i in range(a1.shape[1]): gd_b = np.dot(commo, w2[:, i]) * (a1[:, i] * (1 - a1[:, i])) gw = gd_b * X gw_l[i, :] = sum(gw.T) / y.shape[0] gb_l[i] = sum(gd_b) / y.shape[0] return (gw_l, gb_l) def gradient_descent(y, X, wo, bo, wh, bh, lr, epoch): costs = [] cost_batch = [] indexes = [] for i in range(epoch): v1 = feedforward(train_data[0], wh, bh) v2 = feedforward(v1, wo, bo) err = cost(v2, y_train) costs.append(err) gwh, gbh = gradient_l2(v2, y, v1, X, wo) gwo, gbo = gradient(v2, y, v1) wh = wh - lr * gwh bh = bh - lr * gbh wo = wo - lr * gwo bo = bo - lr * gbo weight_params = {'out_layer': [wo, bo], 'hid_layer': [wh, bh]} return (weight_params, costs) v1 = feedforward(train_data[0], parameters['hid_layer'][0], parameters['hid_layer'][1]) v2 = feedforward(v1, parameters['out_layer'][0], parameters['out_layer'][1]) yhat = v2.tolist() ineed = np.array([yhat[i].index(max(yhat[i])) for i in range(len(yhat))]) compaire = np.array([ineed, train_data[1]]) compaire = compaire.T compaires = compaire[compaire[:, 0] == compaire[:, 1]] compaires.shape
code
72092328/cell_21
[ "text_plain_output_1.png" ]
from glob import glob paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*') paths[0].split('/')[-1].split('.')[0]
code
72092328/cell_25
[ "text_html_output_1.png" ]
from glob import glob import pandas as pd paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*') ids = [path.split('/')[-1].split('.')[0] for path in paths] path_df = pd.DataFrame({'path': paths, 'id': ids}) path_df labels = pd.read_csv('../input/g2net-gravitational-wave-detection/training_labels.csv') train_df = pd.merge(left=labels, right=path_df, on='id') train_df.shape train_df.head()
code
72092328/cell_23
[ "text_plain_output_1.png" ]
from glob import glob import pandas as pd paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*') ids = [path.split('/')[-1].split('.')[0] for path in paths] path_df = pd.DataFrame({'path': paths, 'id': ids}) path_df labels = pd.read_csv('../input/g2net-gravitational-wave-detection/training_labels.csv') train_df = pd.merge(left=labels, right=path_df, on='id') train_df.shape
code
72092328/cell_24
[ "text_html_output_1.png" ]
from glob import glob import pandas as pd paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*') ids = [path.split('/')[-1].split('.')[0] for path in paths] path_df = pd.DataFrame({'path': paths, 'id': ids}) path_df labels = pd.read_csv('../input/g2net-gravitational-wave-detection/training_labels.csv') train_df = pd.merge(left=labels, right=path_df, on='id') train_df.shape display(train_df.head())
code
72092328/cell_27
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from glob import glob import matplotlib.pyplot as plt import numpy as np import pandas as pd paths = glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*') ids = [path.split('/')[-1].split('.')[0] for path in paths] path_df = pd.DataFrame({'path': paths, 'id': ids}) path_df labels = pd.read_csv('../input/g2net-gravitational-wave-detection/training_labels.csv') train_df = pd.merge(left=labels, right=path_df, on='id') train_df.shape target_1 = train_df[train_df.target == 1] target_0 = train_df[train_df.target == 0] target_waves = target_1.sample(50).path.values plt.figure(figsize=(20, 15)) for i in range(1, len(target_waves) + 1): pos = np.load(target_waves[i - 1]) plt.subplot(10, 1, i) plt.plot(pos[0], c='firebrick') plt.plot(pos[1], c='blue') plt.plot(pos[2], c='green') pos.shape
code
16158861/cell_9
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from plotly.plotly import iplot import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] # corelation map f, ax = plt.subplots(figsize=(18,18)) sns.heatmap(data.corr(),annot=True, linewidths=.5, fmt='.2f', ax=ax) trace1 = go.Scatter(x=data.age, y=data.trestbps, mode='lines', name='trestbps', marker=dict(color='rgba(16, 112, 2, 0.8)'), text=data.genderText) trace2 = go.Scatter(x=data.age, y=data.chol, mode='lines+markers', name='chol', marker=dict(color='rgba(80, 26, 80, 0.8)'), text=data.genderText) data2 = [trace1, trace2] layout = dict(title='trestbps and chol accoding to age', xaxis=dict(title='Age', ticklen=5, zeroline=False)) fig = dict(data=data2, layout=layout) iplot(fig)
code
16158861/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] data.head()
code
16158861/cell_11
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from plotly.plotly import iplot import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] # corelation map f, ax = plt.subplots(figsize=(18,18)) sns.heatmap(data.corr(),annot=True, linewidths=.5, fmt='.2f', ax=ax) trace1 = go.Scatter(x=data.age, y=data.trestbps, mode='lines', name='trestbps', marker=dict(color='rgba(16, 112, 2, 0.8)'), text=data.genderText) trace2 = go.Scatter(x=data.age, y=data.chol, mode='lines+markers', name='chol', marker=dict(color='rgba(80, 26, 80, 0.8)'), text=data.genderText) data2 = [trace1, trace2] layout = dict(title='trestbps and chol accoding to age', xaxis=dict(title='Age', ticklen=5, zeroline=False)) fig = dict(data=data2, layout=layout) # %% filtering and joint plot dataFilter1 =data[data.target==1] dataFilter0 =data[data.target==0] g = sns.jointplot(dataFilter1.age, dataFilter1.trestbps, kind="kde", size=7) #plt.savefig('graph.png') plt.show() dataFilterMale = data[data.sex == 1] dataFilterFemale = data[data.sex == 0] MaleThalach = pd.DataFrame(dataFilterMale.thalach) FemaleThalach = pd.DataFrame(dataFilterFemale.thalach) FemaleThalach.index = range(1, 97, 1) MaleThalach.index = range(1, 208, 1) dfMaleThalach = pd.DataFrame(MaleThalach).iloc[0:96, :] dfFemaleThalach = pd.DataFrame(FemaleThalach) unifiedThalach = pd.concat([dfMaleThalach, dfFemaleThalach], axis=1) unifiedThalach.columns = ['Male thalach', 'Female thalach'] pal = sns.cubehelix_palette(2, rot=-0.5, dark=0.3) sns.violinplot(data=unifiedThalach, palette=pal, inner='points') plt.show()
code
16158861/cell_1
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import plotly.graph_objs as go from plotly import tools import plotly.plotly as py from plotly.plotly import iplot from plotly.offline import init_notebook_mode, iplot init_notebook_mode(connected=True) from wordcloud import WordCloud import os print(os.listdir('../input'))
code
16158861/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] # corelation map f, ax = plt.subplots(figsize=(18,18)) sns.heatmap(data.corr(),annot=True, linewidths=.5, fmt='.2f', ax=ax) plt.hist(data.age, bins=50) plt.xlabel('Age') plt.ylabel('Frequency') plt.title('histogram') plt.show()
code
16158861/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] data.info()
code
16158861/cell_10
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from plotly.plotly import iplot import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] # corelation map f, ax = plt.subplots(figsize=(18,18)) sns.heatmap(data.corr(),annot=True, linewidths=.5, fmt='.2f', ax=ax) trace1 = go.Scatter(x=data.age, y=data.trestbps, mode='lines', name='trestbps', marker=dict(color='rgba(16, 112, 2, 0.8)'), text=data.genderText) trace2 = go.Scatter(x=data.age, y=data.chol, mode='lines+markers', name='chol', marker=dict(color='rgba(80, 26, 80, 0.8)'), text=data.genderText) data2 = [trace1, trace2] layout = dict(title='trestbps and chol accoding to age', xaxis=dict(title='Age', ticklen=5, zeroline=False)) fig = dict(data=data2, layout=layout) dataFilter1 = data[data.target == 1] dataFilter0 = data[data.target == 0] g = sns.jointplot(dataFilter1.age, dataFilter1.trestbps, kind='kde', size=7) plt.show()
code
16158861/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot from plotly.plotly import iplot import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objs as go import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] # corelation map f, ax = plt.subplots(figsize=(18,18)) sns.heatmap(data.corr(),annot=True, linewidths=.5, fmt='.2f', ax=ax) trace1 = go.Scatter(x=data.age, y=data.trestbps, mode='lines', name='trestbps', marker=dict(color='rgba(16, 112, 2, 0.8)'), text=data.genderText) trace2 = go.Scatter(x=data.age, y=data.chol, mode='lines+markers', name='chol', marker=dict(color='rgba(80, 26, 80, 0.8)'), text=data.genderText) data2 = [trace1, trace2] layout = dict(title='trestbps and chol accoding to age', xaxis=dict(title='Age', ticklen=5, zeroline=False)) fig = dict(data=data2, layout=layout) # %% filtering and joint plot dataFilter1 =data[data.target==1] dataFilter0 =data[data.target==0] g = sns.jointplot(dataFilter1.age, dataFilter1.trestbps, kind="kde", size=7) #plt.savefig('graph.png') plt.show() dataFilterMale = data[data.sex == 1] dataFilterFemale = data[data.sex == 0] MaleThalach = pd.DataFrame(dataFilterMale.thalach) FemaleThalach = pd.DataFrame(dataFilterFemale.thalach) FemaleThalach.index = range(1, 97, 1) MaleThalach.index = range(1, 208, 1) dfMaleThalach = pd.DataFrame(MaleThalach).iloc[0:96, :] dfFemaleThalach = pd.DataFrame(FemaleThalach) unifiedThalach = pd.concat([dfMaleThalach, dfFemaleThalach], axis=1) unifiedThalach.columns = ['Male thalach', 'Female thalach'] pal = sns.cubehelix_palette(2, rot=-0.5, dark=0.3) trace0 = go.Box(y=data.trestbps, name='trestbps', marker=dict(color='rgb(12, 12, 140)')) trace1 = go.Box(y=data.chol, name='chol', marker=dict(color='rgb(12, 128, 128)')) fig = [trace0, trace1] iplot(fig)
code
16158861/cell_5
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/heart.csv', sep=',') data = data.sort_values(by=['age']) data['genderText'] = ['male' if 1 == each else 'female' for each in data.sex] f, ax = plt.subplots(figsize=(18, 18)) sns.heatmap(data.corr(), annot=True, linewidths=0.5, fmt='.2f', ax=ax)
code
128030673/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig= plt.figure(figsize=(10,120)) fig.subplots_adjust(hspace=0.75) for i,product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33,1,i+1) select = train_df.query('family==@product') for year in [2013,2014,2015,2016,2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax,label=year) plt.title(product) ax.legend(); order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] train_df.groupby('dayname').sales.mean().reindex(index=order).plot(kind='bar') plt.title('Average Sales by Day of week')
code
128030673/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig = plt.figure(figsize=(10, 120)) fig.subplots_adjust(hspace=0.75) for i, product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33, 1, i + 1) select = train_df.query('family==@product') for year in [2013, 2014, 2015, 2016, 2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax, label=year) plt.title(product) ax.legend()
code
128030673/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig= plt.figure(figsize=(10,120)) fig.subplots_adjust(hspace=0.75) for i,product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33,1,i+1) select = train_df.query('family==@product') for year in [2013,2014,2015,2016,2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax,label=year) plt.title(product) ax.legend(); order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] oil = pd.read_csv('../input/store-sales-time-series-forecasting/oil.csv', index_col='date') plt.xticks(rotation=45) stores = pd.read_csv('../input/store-sales-time-series-forecasting/stores.csv', index_col='store_nbr') train_df = pd.merge(train_df, stores, how='left', on='store_nbr') test_df = pd.merge(test_df, stores, how='left', on='store_nbr') train_df.groupby(['type']).sales.mean().plot(kind='bar')
code
128030673/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df
code
128030673/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig= plt.figure(figsize=(10,120)) fig.subplots_adjust(hspace=0.75) for i,product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33,1,i+1) select = train_df.query('family==@product') for year in [2013,2014,2015,2016,2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax,label=year) plt.title(product) ax.legend(); train_df.groupby('dayofmonth').sales.mean().plot(kind='bar') plt.title('Sales Average by Day of Month')
code
128030673/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128030673/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique()
code
128030673/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig= plt.figure(figsize=(10,120)) fig.subplots_adjust(hspace=0.75) for i,product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33,1,i+1) select = train_df.query('family==@product') for year in [2013,2014,2015,2016,2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax,label=year) plt.title(product) ax.legend(); order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] oil = pd.read_csv('../input/store-sales-time-series-forecasting/oil.csv', index_col='date') oil.plot() plt.xticks(rotation=45)
code
128030673/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.head()
code
128030673/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') train_df.family.unique() fig= plt.figure(figsize=(10,120)) fig.subplots_adjust(hspace=0.75) for i,product in enumerate(train_df.family.unique()): ax = fig.add_subplot(33,1,i+1) select = train_df.query('family==@product') for year in [2013,2014,2015,2016,2017]: select.query('year==@year').groupby('month').sales.mean().plot(ax=ax,label=year) plt.title(product) ax.legend(); order = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] oil = pd.read_csv('../input/store-sales-time-series-forecasting/oil.csv', index_col='date') plt.xticks(rotation=45) stores = pd.read_csv('../input/store-sales-time-series-forecasting/stores.csv', index_col='store_nbr') train_df = pd.merge(train_df, stores, how='left', on='store_nbr') test_df = pd.merge(test_df, stores, how='left', on='store_nbr') plt.figure(figsize=(10, 4)) ax1 = plt.subplot(1, 2, 1) train_df.groupby(['city']).sales.mean().plot(kind='bar') plt.title('Average Sales by City') ax2 = plt.subplot(1, 2, 2) train_df.groupby(['city'])['store_nbr'].nunique().plot(kind='bar') plt.title('Number of Stores by City')
code
128030673/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('../input/store-sales-time-series-forecasting/train.csv', index_col='id') test_df = pd.read_csv('../input/store-sales-time-series-forecasting/test.csv', index_col='id') train_df.date = pd.to_datetime(train_df.date) test_df.date = pd.to_datetime(test_df.date) train_df['year'] = train_df.date.dt.year test_df['year'] = test_df.date.dt.year train_df['month'] = train_df.date.dt.month test_df['month'] = test_df.date.dt.month train_df['dayofmonth'] = train_df.date.dt.day test_df['dayofmonth'] = test_df.date.dt.day train_df['dayofweek'] = train_df.date.dt.dayofweek test_df['dayofweek'] = test_df.date.dt.dayofweek train_df['dayname'] = train_df.date.dt.strftime('%A') test_df['dayname'] = test_df.date.dt.strftime('%A') print('Train: ', min(train_df.date), max(train_df.date)) print('\n') print('Test: ', min(test_df.date), max(test_df.date))
code
128018646/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd image_dir = '/kaggle/input/balanced-datasets/Adasyn_dataset' df = pd.read_csv('/kaggle/input/balanced-datasets/Adasyn_dataset/labels.csv') '\ny_one_hot = np.array(df.drop(columns = ["image"], axis = 1))\ny = np.argmax(y_one_hot, axis = 1)\ndf["label"] = y\ndf["label"] = df["label"].astype(str)\ndf[\'image\'] = df[\'image\']+\'.jpg\'\n'
code
128018646/cell_14
[ "text_plain_output_1.png" ]
from keras import models, layers, backend, optimizers, regularizers, metrics #for model manipulation from keras.applications import MobileNet from keras.applications import VGG16 from keras.applications.resnet import ResNet50 from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV from tensorflow.keras.applications import EfficientNetB0 import matplotlib.pyplot as plt #for plotting results import pandas as pd import tensorflow as tf image_dir = '/kaggle/input/balanced-datasets/Adasyn_dataset' df = pd.read_csv('/kaggle/input/balanced-datasets/Adasyn_dataset/labels.csv') '\ny_one_hot = np.array(df.drop(columns = ["image"], axis = 1))\ny = np.argmax(y_one_hot, axis = 1)\ndf["label"] = y\ndf["label"] = df["label"].astype(str)\ndf[\'image\'] = df[\'image\']+\'.jpg\'\n' from keras.applications import MobileNet from keras.applications import VGG16 from keras.applications.resnet import ResNet50 from tensorflow.keras.applications import EfficientNetB0 def choose_conv_base(name='from_scratch', input_shape=(224, 224, 3)): if name == 'from_scratch': conv_base = models.Sequential() conv_base.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape)) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(64, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(128, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(128, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) elif name == 'mobilenet': conv_base = MobileNet(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'vgg16': conv_base = VGG16(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'resnet': conv_base = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'efficientnet': conv_base = EfficientNetB0(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False return conv_base def build_model(input=(224, 224, 3), loss='categorical_crossentropy', optimizer=tf.keras.optimizers.experimental.AdamW()): model = models.Sequential() model.add(choose_conv_base(name='mobilenet')) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(0.2)) model.add(layers.Dense(32, activation='relu')) model.add(layers.Dense(7, activation='softmax')) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) return model es = EarlyStopping(monitor='val_accuracy', verbose=1, min_delta=0.01, patience=10) mc = ModelCheckpoint(monitor='val_accuracy', verbose=1, filepath='./1_best.h5', save_best_only=True) reducelr = ReduceLROnPlateau(monitor='val_accuracy', verbose=1, patience=5, factor=0.5, min_lr=1e-07) cb = [mc] batch_size = 32 epochs = 100 """""" train_val_df, test_df = train_test_split(df, stratify=df['label'], test_size=0.1, random_state=42) train_df, validation_df = train_test_split(train_val_df, stratify=train_val_df['label'], test_size=0.2, random_state=42) augmented_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = datagen.flow_from_dataframe(train_df, directory=image_dir, batch_size=batch_size, target_size=(224, 224), x_col='filename', y_col='label', class_mode='categorical', shuffle=True) val_generator = datagen.flow_from_dataframe(validation_df, directory=image_dir, batch_size=batch_size, target_size=(224, 224), x_col='filename', y_col='label', class_mode='categorical', shuffle=True) model = build_model() history = model.fit(train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=epochs, validation_data=val_generator, validation_steps=len(validation_df) // batch_size, callbacks=cb) history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['accuracy'] val_acc_values = history_dict['val_accuracy'] def smooth_curve(points, factor=0.8): smoothed_points = [] for point in points: if smoothed_points: previous = smoothed_points[-1] smoothed_points.append(previous * factor + point * (1 - factor)) else: smoothed_points.append(point) return smoothed_points epochs = range(1, len(loss_values) + 1) datagen = ImageDataGenerator(rescale=1.0 / 255) test_generator = datagen.flow_from_dataframe(test_df, directory=image_dir, x_col='filename', y_col='label', target_size=(224, 224), batch_size=20, class_mode='categorical') print('last model testing:') result = model.evaluate(test_generator) print(result) print('best model testing:') best_model = models.load_model('./1_best.h5') result = best_model.evaluate(test_generator) print(result)
code
128018646/cell_10
[ "text_plain_output_1.png" ]
from keras import models, layers, backend, optimizers, regularizers, metrics #for model manipulation from keras.applications import MobileNet from keras.applications import VGG16 from keras.applications.resnet import ResNet50 from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from sklearn.model_selection import train_test_split, StratifiedKFold, KFold, cross_val_score, GridSearchCV from tensorflow.keras.applications import EfficientNetB0 import matplotlib.pyplot as plt #for plotting results import pandas as pd import tensorflow as tf image_dir = '/kaggle/input/balanced-datasets/Adasyn_dataset' df = pd.read_csv('/kaggle/input/balanced-datasets/Adasyn_dataset/labels.csv') '\ny_one_hot = np.array(df.drop(columns = ["image"], axis = 1))\ny = np.argmax(y_one_hot, axis = 1)\ndf["label"] = y\ndf["label"] = df["label"].astype(str)\ndf[\'image\'] = df[\'image\']+\'.jpg\'\n' from keras.applications import MobileNet from keras.applications import VGG16 from keras.applications.resnet import ResNet50 from tensorflow.keras.applications import EfficientNetB0 def choose_conv_base(name='from_scratch', input_shape=(224, 224, 3)): if name == 'from_scratch': conv_base = models.Sequential() conv_base.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape)) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(64, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(128, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) conv_base.add(layers.Conv2D(128, (3, 3), activation='relu')) conv_base.add(layers.MaxPooling2D((2, 2))) elif name == 'mobilenet': conv_base = MobileNet(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'vgg16': conv_base = VGG16(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'resnet': conv_base = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False elif name == 'efficientnet': conv_base = EfficientNetB0(weights='imagenet', include_top=False, input_shape=input_shape) conv_base.trainable = False return conv_base def build_model(input=(224, 224, 3), loss='categorical_crossentropy', optimizer=tf.keras.optimizers.experimental.AdamW()): model = models.Sequential() model.add(choose_conv_base(name='mobilenet')) model.add(layers.GlobalAveragePooling2D()) model.add(layers.Dropout(0.2)) model.add(layers.Dense(32, activation='relu')) model.add(layers.Dense(7, activation='softmax')) model.compile(optimizer='adam', loss=loss, metrics=['accuracy']) return model es = EarlyStopping(monitor='val_accuracy', verbose=1, min_delta=0.01, patience=10) mc = ModelCheckpoint(monitor='val_accuracy', verbose=1, filepath='./1_best.h5', save_best_only=True) reducelr = ReduceLROnPlateau(monitor='val_accuracy', verbose=1, patience=5, factor=0.5, min_lr=1e-07) cb = [mc] batch_size = 32 epochs = 100 """""" train_val_df, test_df = train_test_split(df, stratify=df['label'], test_size=0.1, random_state=42) train_df, validation_df = train_test_split(train_val_df, stratify=train_val_df['label'], test_size=0.2, random_state=42) augmented_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') datagen = ImageDataGenerator(rescale=1.0 / 255) train_generator = datagen.flow_from_dataframe(train_df, directory=image_dir, batch_size=batch_size, target_size=(224, 224), x_col='filename', y_col='label', class_mode='categorical', shuffle=True) val_generator = datagen.flow_from_dataframe(validation_df, directory=image_dir, batch_size=batch_size, target_size=(224, 224), x_col='filename', y_col='label', class_mode='categorical', shuffle=True) model = build_model() history = model.fit(train_generator, steps_per_epoch=len(train_df) // batch_size, epochs=epochs, validation_data=val_generator, validation_steps=len(validation_df) // batch_size, callbacks=cb) history_dict = history.history loss_values = history_dict['loss'] val_loss_values = history_dict['val_loss'] acc_values = history_dict['accuracy'] val_acc_values = history_dict['val_accuracy'] def smooth_curve(points, factor=0.8): smoothed_points = [] for point in points: if smoothed_points: previous = smoothed_points[-1] smoothed_points.append(previous * factor + point * (1 - factor)) else: smoothed_points.append(point) return smoothed_points epochs = range(1, len(loss_values) + 1) plt.subplot(1, 2, 1) plt.plot(epochs, smooth_curve(loss_values), 'bo', label='training loss') plt.plot(epochs, smooth_curve(val_loss_values), 'b', label='validation loss') plt.title('training and validation loss') plt.xlabel('epochs') plt.ylabel('loss') plt.legend() plt.subplot(1, 2, 2) plt.plot(epochs, acc_values, 'ro', label='taining accuracy') plt.plot(epochs, val_acc_values, 'r', label='validation accuracy') plt.title('training and validation accuracy') plt.xlabel('epochs') plt.ylabel('accuracy') plt.legend() plt.show()
code
128018646/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
""" #kfold crossvalidation augmented_datagen = ImageDataGenerator(rescale=1./255, shear_range = 0.2 ,rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, zoom_range=0.2, horizontal_flip=True) datagen = ImageDataGenerator(rescale=1./255) kf = StratifiedKFold(n_splits = 5, random_state = 7, shuffle = True) loss_values = val_loss_values = acc_values = val_acc_values = total_scores = [] i = 1 for train_index, val_index in kf.split(np.zeros(len(df)),df["label"]): print(f"processing fold: {i}") train_df = df.iloc[train_index] validation_df = df.iloc[val_index] train_generator = datagen.flow_from_dataframe(train_df, directory = image_dir, batch_size = batch_size, target_size = (224,224), x_col = "filename", y_col = "label", class_mode = "categorical", shuffle = True) val_generator = datagen.flow_from_dataframe(validation_df, directory = image_dir, batch_size = batch_size, target_size = (224,224), x_col = "filename", y_col = "label", class_mode = "categorical", shuffle = True) model = build_model() history = model.fit(train_generator, epochs = epochs, steps_per_epoch = train_generator.samples // batch_size, callbacks = cb, verbose = 1) val_results = model.evaluate(val_generator) #storing loss and acc, using the last value in history total_scores.append(val_results[-1]) print(f"accuracy for fold {i}: {total_scores[-1]}") i+=1 print(f"mean of all accuracies: {np.mean(total_scores)}") """
code
90131532/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any()
code
90131532/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train) lasso_grid_results = pd.DataFrame(lasso_grid.cv_results_) lasso_grid_results = lasso_grid_results.sort_values('rank_test_score') lasso_grid_results.head(10)
code
90131532/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() plt.scatter(data['Chest'], data['BodyFat']) plt.xlabel('Chest Circumference (cm)') plt.ylabel('Percent Body Fat') plt.show()
code
90131532/cell_33
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train) print(lasso_grid.best_params_) print(lasso_grid.best_score_)
code
90131532/cell_44
[ "image_output_1.png" ]
from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import Lasso from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train) lasso_grid_results = pd.DataFrame(lasso_grid.cv_results_) lasso_grid_results = lasso_grid_results.sort_values('rank_test_score') lasso_predictions = lasso_grid.predict(X_test) lasso_r2 = r2_score(y_test, lasso_predictions) lasso_rmse = np.sqrt(mean_squared_error(y_test, lasso_predictions)) coefficients = lasso_grid.best_estimator_.coef_ coefficients = np.abs(coefficients) names = data.columns[1:] plt.xticks(rotation=60) rf_model = RandomForestRegressor() params = {'n_estimators': np.arange(50, 401, 100), 'max_features': ['auto', 'sqrt'], 'max_depth': np.arange(5, 11), 'min_samples_split': [2, 4, 6], 'min_samples_leaf': [1, 2, 4]} rf_grid = GridSearchCV(estimator=rf_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) rf_grid.fit(X_train, y_train)
code
90131532/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr()
code
90131532/cell_40
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train) lasso_grid_results = pd.DataFrame(lasso_grid.cv_results_) lasso_grid_results = lasso_grid_results.sort_values('rank_test_score') lasso_predictions = lasso_grid.predict(X_test) lasso_r2 = r2_score(y_test, lasso_predictions) lasso_rmse = np.sqrt(mean_squared_error(y_test, lasso_predictions)) coefficients = lasso_grid.best_estimator_.coef_ coefficients = np.abs(coefficients) names = data.columns[1:] plt.plot(names, coefficients) plt.xticks(rotation=60) plt.show()
code
90131532/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.info()
code
90131532/cell_19
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.describe()
code
90131532/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90131532/cell_28
[ "image_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) print(X_train.shape) print(X_test.shape) print(y_train.shape) print(y_test.shape)
code
90131532/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') print(data.shape) data.head()
code
90131532/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data[data['BodyFat'] <= 5]
code
90131532/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train)
code
90131532/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() plt.scatter(data['Abdomen'], data['BodyFat']) plt.xlabel('Abdomen Circumference (cm)') plt.ylabel('Percent Body Fat') plt.show()
code
90131532/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.describe()
code
90131532/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() plt.scatter(data['Weight'], data['BodyFat']) plt.xlabel('Body Weight (lbs)') plt.ylabel('Percent Body Fat') plt.show()
code
90131532/cell_37
[ "text_html_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.metrics import mean_squared_error from sklearn.metrics import r2_score from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.model_selection import GridSearchCV from sklearn.linear_model import Lasso from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error pd.options.display.max_rows = None pd.options.display.max_columns = None SEED = 746 data = pd.read_csv('/kaggle/input/body-fat-prediction-dataset/bodyfat.csv') data.drop('Density', axis=1, inplace=True) data.isna().any() data.drop([171, 181], inplace=True) data.corr() X = data.iloc[:, 1:].values y = data.iloc[:, 0].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=SEED) lasso_model = Lasso() params = {'alpha': np.linspace(0, 2, 200)} lasso_grid = GridSearchCV(lasso_model, param_grid=params, cv=4, refit=True, n_jobs=-1, verbose=2) lasso_grid.fit(X_train, y_train) lasso_grid_results = pd.DataFrame(lasso_grid.cv_results_) lasso_grid_results = lasso_grid_results.sort_values('rank_test_score') lasso_predictions = lasso_grid.predict(X_test) lasso_r2 = r2_score(y_test, lasso_predictions) lasso_rmse = np.sqrt(mean_squared_error(y_test, lasso_predictions)) print('R2 Score: ', lasso_r2) print('RMSE Score: ', lasso_rmse)
code
128014636/cell_21
[ "text_plain_output_1.png" ]
from collections import Counter from collections import Counter from itertools import permutations from numpy.random import choice from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) '''A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?''' def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3,2} d = draw() full_house(d) n = 100000 count = 0 for i in range(n): d = np.random.choice([i % 13 for i in range(52)], replace=False, size=52).reshape(4, 13) if np.all([0 in d[j] for j in range(4)]): count += 1 n = 100_000 count = 0 for i in range(n): d = draw() # print(np.random.randint(0,13,size=(4,13))) # 0 in draw[0] if full_house(d): # print(draw) count += 1 # print(draw,'\n',i) # np.all([0 is in i[j]]) print(f'Probability of full house is : {count/n}') coin = list('HT') n = 100000 for i in range(4): count = 0 j = 0 while j < n: toss = choice(coin, size=3) if Counter(toss)['H'] == i: count += 1 j += 1 balls = [i for i in range(1, 21)] choice(balls, 4, replace=False) n = 10000 for i in range(4, 21): j = 0 count = 0 while j < n: if choice(balls, 4, replace=False).max() == i: count += 1 j += 1 print(f'Probability of X={i} by simulation and by formula are : {(count / n, binom(i - 1, 3) / binom(20, 4))} respectively')
code
128014636/cell_13
[ "text_plain_output_1.png" ]
from collections import Counter from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) '''A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?''' def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3,2} d = draw() full_house(d) n = 100000 count = 0 for i in range(n): d = np.random.choice([i % 13 for i in range(52)], replace=False, size=52).reshape(4, 13) if np.all([0 in d[j] for j in range(4)]): count += 1 print(f'Probability that each player receives 1 ace is : {count / n}')
code
128014636/cell_9
[ "text_plain_output_1.png" ]
from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) n = 100000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count / n)
code
128014636/cell_11
[ "text_plain_output_1.png" ]
from collections import Counter from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) """A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?""" def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3, 2} d = draw() full_house(d)
code
128014636/cell_19
[ "text_plain_output_1.png" ]
from collections import Counter from collections import Counter from itertools import permutations from numpy.random import choice from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) '''A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?''' def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3,2} d = draw() full_house(d) n = 100000 count = 0 for i in range(n): d = np.random.choice([i % 13 for i in range(52)], replace=False, size=52).reshape(4, 13) if np.all([0 in d[j] for j in range(4)]): count += 1 n = 100_000 count = 0 for i in range(n): d = draw() # print(np.random.randint(0,13,size=(4,13))) # 0 in draw[0] if full_house(d): # print(draw) count += 1 # print(draw,'\n',i) # np.all([0 is in i[j]]) print(f'Probability of full house is : {count/n}') coin = list('HT') n = 100000 for i in range(4): count = 0 j = 0 while j < n: toss = choice(coin, size=3) if Counter(toss)['H'] == i: count += 1 j += 1 print(f'Getting {i} heads in 3 toss, has probability {count / n}')
code
128014636/cell_7
[ "text_plain_output_1.png" ]
from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs', 'diamonds', 'hearts', 'spades'] def draw(): return list(zip(np.random.choice(range(1, 14), size=5), np.random.choice(colors, size=5))) d = draw() d
code
128014636/cell_8
[ "text_plain_output_1.png" ]
from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d)
code
128014636/cell_16
[ "text_plain_output_1.png" ]
from collections import Counter from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) '''A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?''' def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3,2} d = draw() full_house(d) n = 100000 count = 0 for i in range(n): d = np.random.choice([i % 13 for i in range(52)], replace=False, size=52).reshape(4, 13) if np.all([0 in d[j] for j in range(4)]): count += 1 n = 100_000 count = 0 for i in range(n): d = draw() # print(np.random.randint(0,13,size=(4,13))) # 0 in draw[0] if full_house(d): # print(draw) count += 1 # print(draw,'\n',i) # np.all([0 is in i[j]]) print(f'Probability of full house is : {count/n}') earn = 0 for i in range(n): earn += np.random.choice(range(1, 7)) earn / n
code
128014636/cell_14
[ "text_plain_output_1.png" ]
from collections import Counter from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k colors = ['clubs' , 'diamonds' , 'hearts', 'spades'] def draw(): # np.random.choice(colors,size=5) return list(zip(np.random.choice(range(1,14),size=5), np.random.choice(colors,size=5))) d = draw() d def is_straight(draw): if len(set([i[1] for i in draw])) == 1: return False else: nums = np.sort([i[0] for i in draw]) return np.all(nums == np.array((num[0] + i for i in range(5)))) is_straight(d) # monte carlo simulation n = 100_000 count = 0 for i in range(n): d = draw() if is_straight(d): count += 1 print(count/n) '''A 5-card poker hand is said to be a full house if it consists of 3 cards of the same denomination and 2 other cards of the same denomination (of course, different from the first denomination). Thus, a full house is three of a kind plus a pair. What is the probability that one is dealt a full house?''' def full_house(draw): return set(Counter([i[0] for i in draw]).values()) == {3,2} d = draw() full_house(d) n = 100000 count = 0 for i in range(n): d = np.random.choice([i % 13 for i in range(52)], replace=False, size=52).reshape(4, 13) if np.all([0 in d[j] for j in range(4)]): count += 1 n = 100000 count = 0 for i in range(n): d = draw() if full_house(d): count += 1 print(f'Probability of full house is : {count / n}')
code
128014636/cell_5
[ "text_plain_output_1.png" ]
from itertools import permutations from scipy.special import binom import numpy as np # linear algebra n, m = (4, 2) def functional(m, n): antennas = [0 for i in range(m)] + [1 for i in range(n - m)] network = set(permutations(antennas)) failure = 0 for i in network: for j in range(len(i) - 1): if [i[j], i[j + 1]] == [0, 0]: failure += 1 break k = len(network) return (k - failure) / k for i in range(4, 10): for j in range(2, int(np.ceil(i / 2)) + 1): print(f'No. of Antennas, defective, Probability of functional: {(i, j, functional(j, i), binom(i - j + 1, j) / binom(i, j))}')
code
18149558/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df.head()
code
18149558/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train)
code
18149558/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.head()
code
18149558/cell_4
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') print('Training set shape:', train.shape)
code
18149558/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df
code
18149558/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') print('Item categories:', items_cats.shape)
code
18149558/cell_40
[ "text_html_output_1.png" ]
from keras.layers import Dense from keras.layers import LSTM from keras.models import Sequential import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) finalDf = train_df[['shop_id', 'item_id', 'date_block_num', 'm1', 'm2', 'item_cnt_month']].reset_index() finalDf.drop(['index'], axis=1, inplace=True) newTest = pd.merge_asof(test, finalDf, left_index=True, right_index=True, on=['shop_id', 'item_id']) model_lstm = Sequential() model_lstm.add(LSTM(64, input_shape=(1, 4))) model_lstm.add(Dense(1)) model_lstm.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) y_train = finalDf['item_cnt_month'] newTest.drop(['item_cnt_month'], axis=1, inplace=True) x_train = finalDf[['shop_id', 'item_id', 'm1', 'm2']] history = model_lstm.fit(x_train_reshaped, y_train, epochs=20, batch_size=100, shuffle=False)
code
18149558/cell_29
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/sales_train.csv') test = pd.read_csv('../input/test.csv') items_cats = pd.read_csv('../input/item_categories.csv') items = pd.read_csv('../input/items.csv') shops = pd.read_csv('../input/shops.csv') train.columns.values shops_train = train.groupby(['shop_id']).groups.keys() len(shops_train) item_train = train.groupby(['item_id']).groups.keys() len(item_train) shops_test = test.groupby(['shop_id']).groups.keys() len(shops_test) items_test = test.groupby(['item_id']).groups.keys() len(items_test) train_df = train.groupby(['shop_id', 'item_id', 'date_block_num']).sum().reset_index().sort_values(by=['item_id', 'shop_id']) train_df['m1'] = train_df.groupby(['shop_id', 'item_id']).item_cnt_day.shift() train_df['m1'].fillna(0, inplace=True) train_df train_df['m2'] = train_df.groupby(['shop_id', 'item_id']).m1.shift() train_df['m2'].fillna(0, inplace=True) train_df.rename(columns={'item_cnt_day': 'item_cnt_month'}, inplace=True) finalDf = train_df[['shop_id', 'item_id', 'date_block_num', 'm1', 'm2', 'item_cnt_month']].reset_index() finalDf.drop(['index'], axis=1, inplace=True) finalDf.head()
code
18149558/cell_2
[ "text_plain_output_1.png" ]
from keras.models import Sequential from keras.layers import Dense from keras.layers import LSTM import pandas as pd import numpy as np
code