path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89133561/cell_3
[ "text_plain_output_1.png" ]
# Install pycocotools !pip install pycocotools
code
89133561/cell_12
[ "text_plain_output_1.png" ]
from joblib import Parallel, delayed from tqdm.notebook import tqdm import json import numpy as np import os import pandas as pd thingClasses = ['Aortic enlargement', 'Atelectasis', 'Calcification', 'Cardiomegaly', 'Consolidation', 'ILD', 'Infiltration', 'Lung Opacity', 'Nodule/Mass', 'Other lesion', 'Pleural effusion', 'Pleural thickening', 'Pneumothorax', 'Pulmonary fibrosis', 'No finding'] cfgDict = {'dicomPath': None, 'orgDataPath': '../input/sartorius-cell-instance-segmentation/', 'newDataPath': None, 'cachePath': './', 'trainDataName': 'vinbigdataTrain', 'validDataName': 'vinbigdataValid', 'sampleSize': 1000, 'imSize': 256, 'modelName': 'COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml', 'debug': False, 'outdir': './results/', 'logFile': 'log.txt', 'splitMode': True, 'seed': 111, 'device': 'cuda', 'iter': 1000, 'ims_per_batch': 16, 'roi_batch_size_per_image': 512, 'eval_period': 20, 'lr_scheduler_name': 'WarmupCosineLR', 'base_lr': 0.001, 'checkpoint_period': 500, 'num_workers': 4, 'score_thresh_test': 0.05, 'augKwargs': {'RandomFlip': {'prob': 0.5}, 'RandomRotation': {'angle': [0, 360]}}} def rle2mask(rle, h, w): rleArray = np.fromiter(rle.split(), dtype=np.uint) rleArray = rleArray.reshape((-1, 2)).T rleArray[0] = rleArray[0] - 1 starts, lenghts = rleArray rleArray = np.concatenate([np.arange(s, s + l, dtype=np.uint) for s, l in zip(starts, lenghts)]) mask = np.zeros(h * w, dtype=np.uint8) mask[rleArray] = 1 mask = mask.reshape((h, w)) mask = np.asfortranarray(mask) return mask def mask2annotation(idx, row, catIds): mask = rle2mask(row['annotation'], row['height'], row['width']) rle = maskUtils.encode(mask) rle['counts'] = rle['counts'].decode('utf-8') area = maskUtils.area(rle).item() bbox = maskUtils.toBbox(rle).astype(int).tolist() annotation = {'segmentation': rle, 'bbox': bbox, 'area': area, 'image_id': row['id'], 'category_id': catIds[row['cell_type']], 'iscrowd': 0, 'id': idx} return annotation def df2COCO(cfg, df, workers=4): catIds = {name: id + 1 for id, name in enumerate(df.cell_type.unique())} cats = [{'name': name, 'id': id} for name, id in catIds.items()] images = [{'id': id, 'width': row.width, 'height': row.height, 'file_name': f'train/{id}.png'} for id, row in df.groupby('id').agg('first').iterrows()] annotations = Parallel(n_jobs=workers)((delayed(mask2annotation)(idx, row, catIds) for idx, row in tqdm(df.iterrows(), total=len(df)))) return {'categories': cats, 'images': images, 'annotations': annotations} df = pd.read_csv(os.path.join(cfgDict['orgDataPath'], 'train.csv')) df.head() root = df2COCO(cfgDict, df[:cfgDict['sampleSize']]) with open('annotations_train.json', 'w', encoding='utf-8') as f: json.dump(root, f, ensure_ascii=True, indent=4)
code
105185383/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() plt.figure(figsize=(16, 5)) ax = sns.barplot(data=train, x='product', y='num_sold', hue='country') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Product distribution grouped by country') plt.show()
code
105185383/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes
code
105185383/cell_20
[ "image_output_1.png" ]
from scipy.stats import pearsonr import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train['date'] = pd.to_datetime(train['date']) test['date'] = pd.to_datetime(test['date']) train.dtypes train.nunique() plt.figure(figsize=(16,5)) ax = sns.barplot(data=train, x='product', y='num_sold', hue='country') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Product distribution grouped by country') plt.show() store_corr = pearsonr(train.loc[train['store'] == 'KaggleMart', 'num_sold'], train.loc[train['store'] == 'KaggleRama', 'num_sold'])[0] mult_factor = train.loc[train['store'] == 'KaggleMart', 'num_sold'].sum() / train.loc[train['store'] == 'KaggleRama', 'num_sold'].sum() plt.figure(figsize=(12,5)) ax = sns.lineplot(data=train.groupby(['product','date']).sum()/train.groupby(['date']).sum(), x='date', y='num_sold', hue='product') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Ratio of sales by product') plt.ylabel('Ratio') plt.show() def get_fourier_features(df): dayofbiyear = df['date'].dt.dayofyear + 365 * (1 - df['date'].dt.year % 2) for k in [1, 2, 4]: df[f'sin{k}'] = np.sin(2 * np.pi * k * dayofbiyear / (2 * 365)) df[f'cos{k}'] = np.cos(2 * np.pi * k * dayofbiyear / (2 * 365)) for product in df['product'].unique(): df[f'sin_{k}_{product}'] = df[f'sin{k}'] * (df['product'] == product) df[f'cos_{k}_{product}'] = df[f'cos{k}'] * (df['product'] == product) df = df.drop([f'sin{k}', f'cos{k}'], axis=1) return df def get_GDP_corr(df): feat_corr = [] df['year'] = df['date'].dt.year GDP = pd.read_csv('../input/gdp-of-european-countries/GDP_table.csv', index_col='year') GDP_PC = pd.read_csv('../input/gdp-of-european-countries/GDP_per_capita_table.csv', index_col='year') GDP_dict = GDP.unstack().to_dict() GDP_PC_dict = GDP_PC.unstack().to_dict() df['GDP'] = df.set_index(['country', 'year']).index.map(GDP_dict.get) df['GDP_PC'] = df.set_index(['country', 'year']).index.map(GDP_PC_dict.get) for country in df['country'].unique(): subset = df[(df['country'] == country) & (df['year'] <= 2019)].groupby(['year']).agg(S=('S', 'sum'), GDP=('GDP', 'mean'), GDP_PC=('GDP_PC', 'mean')) r1 = pearsonr(subset['S'], subset['GDP'])[0] r2 = pearsonr(subset['S'], subset['GDP_PC'])[0] feat_corr.append([f'{country}', r1, r2]) return pd.DataFrame(feat_corr, columns=['Country', 'GDP_corr', 'GDP_PC_corr']) corr_df = get_GDP_corr(train) corr_df
code
105185383/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() print('Countries:', list(train['country'].unique()), '\n') print('Stores:', list(train['store'].unique()), '\n') print('Products:', list(train['product'].unique()))
code
105185383/cell_19
[ "text_plain_output_1.png" ]
""" def get_holidays(df): years_list = [2017, 2018, 2019, 2020, 2021] holiday_BE = holidays.CountryHoliday('BE', years = years_list) holiday_FR = holidays.CountryHoliday('FR', years = years_list) holiday_DE = holidays.CountryHoliday('DE', years = years_list) holiday_IT = holidays.CountryHoliday('IT', years = years_list) holiday_PL = holidays.CountryHoliday('PL', years = years_list) holiday_ES = holidays.CountryHoliday('ES', years = years_list) holiday_dict = holiday_BE.copy() holiday_dict.update(holiday_FR) holiday_dict.update(holiday_DE) holiday_dict.update(holiday_IT) holiday_dict.update(holiday_PL) holiday_dict.update(holiday_ES) df['holiday_name'] = df['date'].map(holiday_dict) df['is_holiday'] = np.where(df['holiday_name'].notnull(), 1, 0) df['holiday_name'] = df['holiday_name'].fillna('Not Holiday') return df """
code
105185383/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
105185383/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') print('Train set shape:', train.shape) print('Test set shape:', test.shape) train.head(3)
code
105185383/cell_15
[ "image_output_1.png" ]
from scipy.stats import pearsonr import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() plt.figure(figsize=(16,5)) ax = sns.barplot(data=train, x='product', y='num_sold', hue='country') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Product distribution grouped by country') plt.show() store_corr = pearsonr(train.loc[train['store'] == 'KaggleMart', 'num_sold'], train.loc[train['store'] == 'KaggleRama', 'num_sold'])[0] print(f'Store correlation: {store_corr:.4f}') mult_factor = train.loc[train['store'] == 'KaggleMart', 'num_sold'].sum() / train.loc[train['store'] == 'KaggleRama', 'num_sold'].sum() print(f'Multiplicative factor: {mult_factor:.4f}')
code
105185383/cell_16
[ "image_output_1.png" ]
from scipy.stats import pearsonr import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() plt.figure(figsize=(16,5)) ax = sns.barplot(data=train, x='product', y='num_sold', hue='country') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Product distribution grouped by country') plt.show() store_corr = pearsonr(train.loc[train['store'] == 'KaggleMart', 'num_sold'], train.loc[train['store'] == 'KaggleRama', 'num_sold'])[0] mult_factor = train.loc[train['store'] == 'KaggleMart', 'num_sold'].sum() / train.loc[train['store'] == 'KaggleRama', 'num_sold'].sum() plt.figure(figsize=(12, 5)) ax = sns.lineplot(data=train.groupby(['product', 'date']).sum() / train.groupby(['date']).sum(), x='date', y='num_sold', hue='product') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Ratio of sales by product') plt.ylabel('Ratio') plt.show()
code
105185383/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() plt.figure(figsize=(16,5)) ax = sns.barplot(data=train, x='product', y='num_sold', hue='country') ax.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Product distribution grouped by country') plt.show() plt.figure(figsize=(12, 5)) sns.lineplot(data=train.groupby(['date', 'store']).sum(), x='date', y='num_sold', hue='store') plt.title('Sales by store') plt.show()
code
105185383/cell_10
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique()
code
105185383/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/tabular-playground-series-sep-2022/train.csv', index_col='row_id') test = pd.read_csv('../input/tabular-playground-series-sep-2022/test.csv', index_col='row_id') train.dtypes train.nunique() print('TRAIN:') print('Min date', train['date'].min()) print('Max date', train['date'].max()) print('') print('TEST:') print('Min date', test['date'].min()) print('Max date', test['date'].max())
code
48165933/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize, sent_tokenize from sklearn import metrics from sklearn import metrics from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from time import time import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns twenty_train = fetch_20newsgroups(subset='train', shuffle=True, random_state=42) twenty_test = fetch_20newsgroups(subset='test') def clean_tag(text): return re.sub('<.*?>', '', text) def clean_url(text): return re.sub('http\\S+', '', text) def clean_special_character(text): return re.sub('[^a-zA-Z]', ' ', text) def clean_uppercase(text): return str(text).lower() def sent_tokenization(text): return sent_tokenize(text) def tokenization(text): return word_tokenize(text) def clean_stop_word(tokens): stop_words = set(stopwords.words('english')) return [token for token in tokens if token not in stop_words] def steam(tokens): return [PorterStemmer().stem(token) for token in tokens] def lenmatization(tokens): return [WordNetLemmatizer().lemmatize(word=token, pos='v') for token in tokens] def clean_length(tokens): return [token for token in tokens if len(token) > 2] def convert_2_string(text): return ' '.join(text) def clean(text): res = clean_url(text) res = clean_special_character(res) res = clean_uppercase(res) res = tokenization(res) res = clean_stop_word(res) res = lenmatization(res) res = clean_length(res) return convert_2_string(res) example = twenty_train.data[0] after_clean = clean(example) processed_train_data = [clean(letter) for letter in twenty_train.data] processed_test_data = [clean(letter) for letter in twenty_test.data] reports = [] def draw_confusion_matrix(target, predicted, target_names=twenty_test.target_names, normalize=None): cm = metrics.confusion_matrix(target, predicted, normalize=normalize) df_cm = pd.DataFrame(cm, index=[i for i in target_names], columns=target_names) def benchmark(pipeline, clf_name, X_train=processed_train_data, y_train=twenty_train.target, X_test=processed_test_data, y_test=twenty_test.target): report = [] report.append(clf_name) t0 = time() pipeline.fit(X_train, y_train) train_time = time() - t0 report.append(train_time) t0 = time() pred = pipeline.predict(X_test) test_time = time() - t0 report.append(test_time) accuracy = metrics.accuracy_score(y_test, pred) report.append(accuracy) precision = metrics.precision_score(y_test, pred, average='micro') report.append(precision) recall = metrics.precision_score(y_test, pred, average='micro') report.append(recall) f1_score = metrics.f1_score(y_test, pred, average='micro') report.append(f1_score) mathew = metrics.matthews_corrcoef(y_test, pred) report.append(mathew) reports.append(report) clf = pipeline.named_steps.clf vectorizer = pipeline.named_steps.vect feature_names = vectorizer.get_feature_names() if hasattr(clf, 'coef_'): for i, label in enumerate(twenty_train.target_names): top10 = np.argsort(clf.coef_[i])[-10:] linearSVC_pip = Pipeline([('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2))), ('tf', TfidfTransformer()), ('clf', LinearSVC(C=10))]) benchmark(linearSVC_pip, 'LinearSVC')
code
48165933/cell_29
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize, sent_tokenize from sklearn import metrics from sklearn import metrics from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.neighbors import KNeighborsClassifier from sklearn.pipeline import Pipeline from time import time import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns twenty_train = fetch_20newsgroups(subset='train', shuffle=True, random_state=42) twenty_test = fetch_20newsgroups(subset='test') def clean_tag(text): return re.sub('<.*?>', '', text) def clean_url(text): return re.sub('http\\S+', '', text) def clean_special_character(text): return re.sub('[^a-zA-Z]', ' ', text) def clean_uppercase(text): return str(text).lower() def sent_tokenization(text): return sent_tokenize(text) def tokenization(text): return word_tokenize(text) def clean_stop_word(tokens): stop_words = set(stopwords.words('english')) return [token for token in tokens if token not in stop_words] def steam(tokens): return [PorterStemmer().stem(token) for token in tokens] def lenmatization(tokens): return [WordNetLemmatizer().lemmatize(word=token, pos='v') for token in tokens] def clean_length(tokens): return [token for token in tokens if len(token) > 2] def convert_2_string(text): return ' '.join(text) def clean(text): res = clean_url(text) res = clean_special_character(res) res = clean_uppercase(res) res = tokenization(res) res = clean_stop_word(res) res = lenmatization(res) res = clean_length(res) return convert_2_string(res) example = twenty_train.data[0] after_clean = clean(example) processed_train_data = [clean(letter) for letter in twenty_train.data] processed_test_data = [clean(letter) for letter in twenty_test.data] reports = [] def draw_confusion_matrix(target, predicted, target_names=twenty_test.target_names, normalize=None): cm = metrics.confusion_matrix(target, predicted, normalize=normalize) df_cm = pd.DataFrame(cm, index=[i for i in target_names], columns=target_names) def benchmark(pipeline, clf_name, X_train=processed_train_data, y_train=twenty_train.target, X_test=processed_test_data, y_test=twenty_test.target): report = [] report.append(clf_name) t0 = time() pipeline.fit(X_train, y_train) train_time = time() - t0 report.append(train_time) t0 = time() pred = pipeline.predict(X_test) test_time = time() - t0 report.append(test_time) accuracy = metrics.accuracy_score(y_test, pred) report.append(accuracy) precision = metrics.precision_score(y_test, pred, average='micro') report.append(precision) recall = metrics.precision_score(y_test, pred, average='micro') report.append(recall) f1_score = metrics.f1_score(y_test, pred, average='micro') report.append(f1_score) mathew = metrics.matthews_corrcoef(y_test, pred) report.append(mathew) reports.append(report) clf = pipeline.named_steps.clf vectorizer = pipeline.named_steps.vect feature_names = vectorizer.get_feature_names() if hasattr(clf, 'coef_'): for i, label in enumerate(twenty_train.target_names): top10 = np.argsort(clf.coef_[i])[-10:] KNN_pip = Pipeline([('vect', CountVectorizer(max_df=0.75, ngram_range=(1, 2))), ('tf', TfidfTransformer()), ('clf', KNeighborsClassifier(n_neighbors=10))]) benchmark(KNN_pip, 'K Neighbors Classifier')
code
48165933/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import nltk import re import nltk from nltk.tokenize import word_tokenize, sent_tokenize from nltk.stem import WordNetLemmatizer from nltk.stem import PorterStemmer from nltk.corpus import stopwords nltk.download('stopwords') nltk.download('punkt') nltk.download('wordnet')
code
48165933/cell_10
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from nltk.corpus import stopwords from nltk.stem import PorterStemmer from nltk.stem import WordNetLemmatizer from nltk.tokenize import word_tokenize, sent_tokenize from sklearn.datasets import fetch_20newsgroups import re twenty_train = fetch_20newsgroups(subset='train', shuffle=True, random_state=42) def clean_tag(text): return re.sub('<.*?>', '', text) def clean_url(text): return re.sub('http\\S+', '', text) def clean_special_character(text): return re.sub('[^a-zA-Z]', ' ', text) def clean_uppercase(text): return str(text).lower() def sent_tokenization(text): return sent_tokenize(text) def tokenization(text): return word_tokenize(text) def clean_stop_word(tokens): stop_words = set(stopwords.words('english')) return [token for token in tokens if token not in stop_words] def steam(tokens): return [PorterStemmer().stem(token) for token in tokens] def lenmatization(tokens): return [WordNetLemmatizer().lemmatize(word=token, pos='v') for token in tokens] def clean_length(tokens): return [token for token in tokens if len(token) > 2] def convert_2_string(text): return ' '.join(text) def clean(text): res = clean_url(text) res = clean_special_character(res) res = clean_uppercase(res) res = tokenization(res) res = clean_stop_word(res) res = lenmatization(res) res = clean_length(res) return convert_2_string(res) example = twenty_train.data[0] after_clean = clean(example) print(example, after_clean)
code
90153165/cell_13
[ "text_plain_output_1.png" ]
from scipy.io import arff import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.isnull().sum() df.corr() fig, ax = plt.subplots(4,2, figsize=(20,20)) sns.distplot(df.Area, bins = 40, ax=ax[0,0]) sns.distplot(df.Perimeter, bins = 40, ax=ax[0,1]) sns.distplot(df.Eccentricity, bins = 40, ax=ax[1,0]) sns.distplot(df.roundness, bins = 40, ax=ax[1,1]) sns.distplot(df.ConvexArea, bins = 40, ax=ax[2,0]) sns.distplot(df.Extent, bins = 40, ax=ax[2,1]) sns.distplot(df.Solidity, bins = 40, ax=ax[3,0]) sns.distplot(df.roundness, bins = 40, ax=ax[3,1]) # Check for outliers fig, ax = plt.subplots(4,2, figsize=(20,20)) sns.boxplot(df.Area, ax=ax[0,0]) sns.boxplot(df.Perimeter, ax=ax[0,1]) sns.boxplot(df.Eccentricity, ax=ax[1,0]) sns.boxplot(df.roundness, ax=ax[1,1]) sns.boxplot(df.ConvexArea, ax=ax[2,0]) sns.boxplot(df.Extent, ax=ax[2,1]) sns.boxplot(df.Solidity, ax=ax[3,0]) sns.boxplot(df.roundness, ax=ax[3,1]) df_without_outliers = df.copy() Q1 = df_without_outliers.quantile(q=0.25) Q3 = df_without_outliers.quantile(q=0.75) IQR = Q3 - Q1 df_without_outliers = df_without_outliers[~((df_without_outliers < Q1 - 1.5 * IQR) | (df_without_outliers > Q3 + 1.5 * IQR)).any(axis=1)] round((df.shape[0] - df_without_outliers.shape[0]) * 100 / df.shape[0])
code
90153165/cell_9
[ "text_plain_output_100.png", "text_plain_output_334.png", "text_plain_output_770.png", "text_plain_output_743.png", "text_plain_output_673.png", "text_plain_output_445.png", "text_plain_output_640.png", "text_plain_output_822.png", "text_plain_output_201.png", "text_plain_output_586.png", "text_plain_output_261.png", "text_plain_output_775.png", "text_plain_output_819.png", "text_plain_output_565.png", "text_plain_output_522.png", "text_plain_output_84.png", "text_plain_output_624.png", "text_plain_output_521.png", "text_plain_output_322.png", "text_plain_output_769.png", "text_plain_output_205.png", "text_plain_output_826.png", "text_plain_output_693.png", "text_plain_output_828.png", "text_plain_output_824.png", "text_plain_output_511.png", "text_plain_output_608.png", "text_plain_output_271.png", "text_plain_output_56.png", "text_plain_output_475.png", "text_plain_output_158.png", "text_plain_output_455.png", "text_plain_output_223.png", "text_plain_output_218.png", "text_plain_output_264.png", "text_plain_output_715.png", "text_plain_output_282.png", "text_plain_output_579.png", "text_plain_output_793.png", "text_plain_output_629.png", "text_plain_output_396.png", "text_plain_output_287.png", "text_plain_output_232.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_362.png", "text_plain_output_813.png", "text_plain_output_35.png", "text_plain_output_697.png", "text_plain_output_501.png", "text_plain_output_593.png", "text_plain_output_258.png", "text_plain_output_685.png", "text_plain_output_452.png", "text_plain_output_130.png", "text_plain_output_598.png", "text_plain_output_490.png", "text_plain_output_790.png", "text_plain_output_449.png", "text_plain_output_462.png", "text_plain_output_117.png", "text_plain_output_286.png", "text_plain_output_853.png", "text_plain_output_367.png", "text_plain_output_750.png", "text_plain_output_262.png", "text_plain_output_278.png", "text_plain_output_588.png", "text_plain_output_395.png", "text_plain_output_617.png", "text_plain_output_254.png", "text_plain_output_307.png", "text_plain_output_570.png", "text_plain_output_799.png", "text_plain_output_674.png", "text_plain_output_833.png", "text_plain_output_98.png", "text_plain_output_399.png", "text_plain_output_671.png", "text_plain_output_718.png", "text_plain_output_236.png", "text_plain_output_195.png", "text_plain_output_756.png", "text_plain_output_678.png", "text_plain_output_688.png", "text_plain_output_471.png", "text_plain_output_219.png", "text_plain_output_614.png", "text_plain_output_768.png", "text_plain_output_420.png", "text_plain_output_514.png", "text_plain_output_485.png", "text_plain_output_237.png", "text_plain_output_43.png", "text_plain_output_284.png", "text_plain_output_187.png", "text_plain_output_309.png", "text_plain_output_576.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_670.png", "text_plain_output_544.png", "text_plain_output_192.png", "text_plain_output_426.png", "text_plain_output_184.png", "text_plain_output_477.png", "text_plain_output_274.png", "text_plain_output_172.png", "text_plain_output_664.png", "text_plain_output_716.png", "text_plain_output_627.png", "text_plain_output_613.png", "text_plain_output_736.png", "text_plain_output_332.png", "text_plain_output_147.png", "text_plain_output_443.png", "text_plain_output_327.png", "text_plain_output_684.png", "text_plain_output_774.png", "text_plain_output_256.png", "text_plain_output_90.png", "text_plain_output_79.png", "text_plain_output_331.png", "text_plain_output_809.png", "text_plain_output_851.png", "text_plain_output_5.png", "text_plain_output_846.png", "text_plain_output_642.png", "text_plain_output_550.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_388.png", "text_plain_output_422.png", "text_plain_output_116.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_213.png", "text_plain_output_73.png", "text_plain_output_126.png", "text_plain_output_676.png", "text_plain_output_704.png", "text_plain_output_687.png", "text_plain_output_776.png", "text_plain_output_492.png", "text_plain_output_321.png", "text_plain_output_272.png", "text_plain_output_115.png", "text_plain_output_748.png", "text_plain_output_474.png", "text_plain_output_407.png", "text_plain_output_482.png", "text_plain_output_316.png", "text_plain_output_634.png", "text_plain_output_656.png", "text_plain_output_355.png", "text_plain_output_15.png", "text_plain_output_390.png", "text_plain_output_133.png", "text_plain_output_771.png", "text_plain_output_651.png", "text_plain_output_437.png", "text_plain_output_198.png", "text_plain_output_699.png", "text_plain_output_387.png", "text_plain_output_555.png", "text_plain_output_548.png", "text_plain_output_759.png", "text_plain_output_178.png", "text_plain_output_226.png", "text_plain_output_154.png", "text_plain_output_234.png", "text_plain_output_375.png", "text_plain_output_404.png", "text_plain_output_831.png", "text_plain_output_114.png", "text_plain_output_659.png", "text_plain_output_515.png", "text_plain_output_157.png", "text_plain_output_773.png", "text_plain_output_494.png", "text_plain_output_317.png", "text_plain_output_251.png", "text_plain_output_470.png", "text_plain_output_496.png", "text_plain_output_836.png", "text_plain_output_423.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_712.png", "text_plain_output_484.png", "text_plain_output_44.png", "text_plain_output_633.png", "text_plain_output_796.png", "text_plain_output_325.png", "text_plain_output_785.png", "text_plain_output_203.png", "text_plain_output_505.png", "text_plain_output_603.png", "text_plain_output_655.png", "text_plain_output_119.png", "text_plain_output_546.png", "text_plain_output_540.png", "text_plain_output_373.png", "text_plain_output_504.png", "text_plain_output_86.png", "text_plain_output_244.png", "text_plain_output_741.png", "text_plain_output_118.png", "text_plain_output_551.png", "text_plain_output_583.png", "text_plain_output_855.png", "text_plain_output_131.png", "text_plain_output_817.png", "text_plain_output_40.png", "text_plain_output_343.png", "text_plain_output_123.png", "text_plain_output_74.png", "text_plain_output_734.png", "text_plain_output_190.png", "text_plain_output_302.png", "text_plain_output_604.png", "text_plain_output_31.png", "text_plain_output_340.png", "text_plain_output_379.png", "text_plain_output_281.png", "text_plain_output_639.png", "text_plain_output_20.png", "text_plain_output_557.png", "text_plain_output_273.png", "text_plain_output_706.png", "text_plain_output_263.png", "text_plain_output_102.png", "text_plain_output_229.png", "text_plain_output_111.png", "text_plain_output_686.png", "text_plain_output_802.png", "text_plain_output_753.png", "text_plain_output_669.png", "text_plain_output_414.png", "text_plain_output_461.png", "text_plain_output_510.png", "text_plain_output_222.png", "text_plain_output_589.png", "text_plain_output_101.png", "text_plain_output_530.png", "text_plain_output_169.png", "text_plain_output_531.png", "text_plain_output_144.png", "text_plain_output_161.png", "text_plain_output_489.png", "text_plain_output_305.png", "text_plain_output_275.png", "text_plain_output_779.png", "text_plain_output_795.png", "text_plain_output_725.png", "text_plain_output_301.png", "text_plain_output_132.png", "text_plain_output_60.png", "text_plain_output_691.png", "text_plain_output_764.png", "text_plain_output_467.png", "text_plain_output_502.png", "text_plain_output_794.png", "text_plain_output_221.png", "text_plain_output_596.png", "text_plain_output_564.png", "text_plain_output_552.png", "text_plain_output_720.png", "text_plain_output_654.png", "text_plain_output_330.png", "text_plain_output_155.png", "text_plain_output_638.png", "text_plain_output_434.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_618.png", "text_plain_output_64.png", "text_plain_output_803.png", "text_plain_output_419.png", "text_plain_output_215.png", "text_plain_output_818.png", "text_plain_output_532.png", "text_plain_output_189.png", "text_plain_output_415.png", "text_plain_output_637.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_666.png", "text_plain_output_107.png", "text_plain_output_746.png", "text_plain_output_567.png", "text_plain_output_628.png", "text_plain_output_398.png", "text_plain_output_312.png", "text_plain_output_248.png", "text_plain_output_695.png", "text_plain_output_318.png", "text_plain_output_808.png", "text_plain_output_417.png", "text_plain_output_707.png", "text_plain_output_690.png", "text_plain_output_52.png", "text_plain_output_545.png", "text_plain_output_393.png", "text_plain_output_758.png", "text_plain_output_858.png", "text_plain_output_572.png", "text_plain_output_594.png", "text_plain_output_66.png", "text_plain_output_446.png", "text_plain_output_243.png", "text_plain_output_611.png", "text_plain_output_45.png", "text_plain_output_380.png", "text_plain_output_599.png", "text_plain_output_692.png", "text_plain_output_442.png", "text_plain_output_665.png", "text_plain_output_300.png", "text_plain_output_660.png", "text_plain_output_257.png", "text_plain_output_405.png", "text_plain_output_353.png", "text_plain_output_476.png", "text_plain_output_277.png", "text_plain_output_457.png", "text_plain_output_739.png", "text_plain_output_740.png", "text_plain_output_361.png", "text_plain_output_171.png", "text_plain_output_837.png", "text_plain_output_518.png", "text_plain_output_561.png", "text_plain_output_431.png", "text_plain_output_14.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_516.png", "text_plain_output_304.png", "text_plain_output_88.png", "text_plain_output_240.png", "text_plain_output_713.png", "text_plain_output_29.png", "text_plain_output_359.png", "text_plain_output_529.png", "text_plain_output_347.png", "text_plain_output_140.png", "text_plain_output_606.png", "text_plain_output_763.png", "text_plain_output_376.png", "text_plain_output_280.png", "text_plain_output_783.png", "text_plain_output_129.png", "text_plain_output_728.png", "text_plain_output_349.png", "text_plain_output_242.png", "text_plain_output_483.png", "text_plain_output_460.png", "text_plain_output_363.png", "text_plain_output_289.png", "text_plain_output_255.png", "text_plain_output_160.png", "text_plain_output_58.png", "text_plain_output_680.png", "text_plain_output_622.png", "text_plain_output_329.png", "text_plain_output_49.png", "text_plain_output_791.png", "text_plain_output_708.png", "text_plain_output_63.png", "text_plain_output_260.png", "text_plain_output_294.png", "text_plain_output_27.png", "text_plain_output_392.png", "text_plain_output_320.png", "text_plain_output_177.png", "text_plain_output_607.png", "text_plain_output_386.png", "text_plain_output_438.png", "text_plain_output_76.png", "text_plain_output_681.png", "text_plain_output_850.png", "text_plain_output_333.png", "text_plain_output_108.png", "text_plain_output_581.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "text_plain_output_700.png", "text_plain_output_854.png", "text_plain_output_269.png", "text_plain_output_276.png", "text_plain_output_6.png", "text_plain_output_326.png", "text_plain_output_744.png", "text_plain_output_503.png", "text_plain_output_578.png", "text_plain_output_735.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_658.png", "text_plain_output_57.png", "text_plain_output_120.png", "text_plain_output_469.png", "text_plain_output_24.png", "text_plain_output_357.png", "text_plain_output_21.png", "text_plain_output_344.png", "text_plain_output_849.png", "text_plain_output_104.png", "text_plain_output_784.png", "text_plain_output_270.png", "text_plain_output_47.png", "text_plain_output_623.png", "text_plain_output_466.png", "text_plain_output_568.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_523.png", "text_plain_output_401.png", "text_plain_output_77.png", "text_plain_output_421.png", "text_plain_output_288.png", "text_plain_output_535.png", "text_plain_output_842.png", "text_plain_output_527.png", "text_plain_output_488.png", "text_plain_output_18.png", "text_plain_output_183.png", "text_plain_output_266.png", "text_plain_output_149.png", "text_plain_output_839.png", "text_plain_output_208.png", "text_plain_output_50.png", "text_plain_output_36.png", "text_plain_output_646.png", "text_plain_output_383.png", "text_plain_output_207.png", "text_plain_output_766.png", "text_plain_output_391.png", "text_plain_output_413.png", "text_plain_output_709.png", "text_plain_output_96.png", "text_plain_output_847.png", "text_plain_output_663.png", "text_plain_output_87.png", "text_plain_output_3.png", "text_plain_output_217.png", "text_plain_output_418.png", "text_plain_output_657.png", "text_plain_output_427.png", "text_plain_output_180.png", "text_plain_output_556.png", "text_plain_output_141.png", "text_plain_output_210.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_225.png", "text_plain_output_701.png", "text_plain_output_191.png", "text_plain_output_609.png", "text_plain_output_737.png", "text_plain_output_821.png", "text_plain_output_259.png", "text_plain_output_798.png", "text_plain_output_447.png", "text_plain_output_290.png", "text_plain_output_801.png", "text_plain_output_283.png", "text_plain_output_495.png", "text_plain_output_247.png", "text_plain_output_835.png", "text_plain_output_113.png", "text_plain_output_371.png", "text_plain_output_827.png", "text_plain_output_479.png", "text_plain_output_324.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_366.png", "text_plain_output_328.png", "text_plain_output_81.png", "text_plain_output_730.png", "text_plain_output_69.png", "text_plain_output_368.png", "text_plain_output_667.png", "text_plain_output_372.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_767.png", "text_plain_output_542.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "text_plain_output_754.png", "text_plain_output_454.png", "text_plain_output_806.png", "text_plain_output_814.png", "text_plain_output_487.png", "text_plain_output_595.png", "text_plain_output_643.png", "text_plain_output_338.png", "text_plain_output_575.png", "text_plain_output_197.png", "text_plain_output_843.png", "text_plain_output_512.png", "text_plain_output_777.png", "text_plain_output_738.png", "text_plain_output_382.png", "text_plain_output_315.png", "text_plain_output_429.png", "text_plain_output_38.png", "text_plain_output_517.png", "text_plain_output_823.png", "text_plain_output_682.png", "text_plain_output_433.png", "text_plain_output_7.png", "text_plain_output_528.png", "text_plain_output_648.png", "text_plain_output_214.png", "text_plain_output_166.png", "text_plain_output_358.png", "text_plain_output_726.png", "text_plain_output_513.png", "text_plain_output_714.png", "text_plain_output_314.png", "text_plain_output_745.png", "text_plain_output_592.png", "text_plain_output_410.png", "text_plain_output_432.png", "text_plain_output_645.png", "text_plain_output_411.png", "text_plain_output_91.png", "text_plain_output_787.png", "text_plain_output_308.png", "text_plain_output_245.png", "text_plain_output_16.png", "text_plain_output_497.png", "text_plain_output_789.png", "text_plain_output_174.png", "text_plain_output_845.png", "text_plain_output_812.png", "text_plain_output_212.png", "text_plain_output_652.png", "text_plain_output_644.png", "text_plain_output_230.png", "text_plain_output_265.png", "text_plain_output_430.png", "text_plain_output_742.png", "text_plain_output_630.png", "text_plain_output_778.png", "text_plain_output_435.png", "text_plain_output_689.png", "text_plain_output_378.png", "text_plain_output_59.png", "text_plain_output_580.png", "text_plain_output_409.png", "text_plain_output_844.png", "text_plain_output_206.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_788.png", "text_plain_output_732.png", "text_plain_output_751.png", "text_plain_output_848.png", "text_plain_output_539.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_384.png", "text_plain_output_498.png", "text_plain_output_211.png", "text_plain_output_662.png", "text_plain_output_780.png", "text_plain_output_182.png", "text_plain_output_26.png", "text_plain_output_601.png", "text_plain_output_830.png", "text_plain_output_554.png", "text_plain_output_536.png", "text_plain_output_620.png", "text_plain_output_406.png", "text_plain_output_310.png", "text_plain_output_760.png", "text_plain_output_456.png", "text_plain_output_541.png", "text_plain_output_558.png", "text_plain_output_668.png", "text_plain_output_702.png", "text_plain_output_724.png", "text_plain_output_220.png", "text_plain_output_749.png", "text_plain_output_834.png", "text_plain_output_653.png", "text_plain_output_543.png", "text_plain_output_781.png", "text_plain_output_451.png", "text_plain_output_109.png", "text_plain_output_459.png", "text_plain_output_238.png", "text_plain_output_520.png", "text_plain_output_616.png", "text_plain_output_615.png", "text_plain_output_41.png", "text_plain_output_34.png", "text_plain_output_612.png", "text_plain_output_832.png", "text_plain_output_253.png", "text_plain_output_346.png", "text_plain_output_723.png", "text_plain_output_291.png", "text_plain_output_168.png", "text_plain_output_800.png", "text_plain_output_394.png", "text_plain_output_204.png", "text_plain_output_241.png", "text_plain_output_231.png", "text_plain_output_533.png", "text_plain_output_345.png", "text_plain_output_649.png", "text_plain_output_350.png", "text_plain_output_209.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_765.png", "text_plain_output_636.png", "text_plain_output_42.png", "text_plain_output_110.png", "text_plain_output_605.png", "text_plain_output_549.png", "text_plain_output_67.png", "text_plain_output_797.png", "text_plain_output_508.png", "text_plain_output_573.png", "text_plain_output_468.png", "text_plain_output_370.png", "text_plain_output_297.png", "text_plain_output_53.png", "text_plain_output_313.png", "text_plain_output_224.png", "text_plain_output_635.png", "text_plain_output_703.png", "text_plain_output_711.png", "text_plain_output_193.png", "text_plain_output_441.png", "text_plain_output_403.png", "text_plain_output_696.png", "text_plain_output_816.png", "text_plain_output_23.png", "text_plain_output_761.png", "text_plain_output_610.png", "text_plain_output_173.png", "text_plain_output_683.png", "text_plain_output_235.png", "text_plain_output_151.png", "text_plain_output_89.png", "text_plain_output_299.png", "text_plain_output_632.png", "text_plain_output_51.png", "text_plain_output_677.png", "text_plain_output_626.png", "text_plain_output_450.png", "text_plain_output_252.png", "text_plain_output_825.png", "text_plain_output_296.png", "text_plain_output_525.png", "text_plain_output_731.png", "text_plain_output_672.png", "text_plain_output_705.png", "text_plain_output_840.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_381.png", "text_plain_output_571.png", "text_plain_output_163.png", "text_plain_output_179.png", "text_plain_output_820.png", "text_plain_output_537.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_602.png", "text_plain_output_246.png", "text_plain_output_2.png", "text_plain_output_569.png", "text_plain_output_772.png", "text_plain_output_239.png", "text_plain_output_127.png", "text_plain_output_559.png", "text_plain_output_311.png", "text_plain_output_710.png", "text_plain_output_500.png", "text_plain_output_719.png", "text_plain_output_295.png", "text_plain_output_279.png", "text_plain_output_507.png", "text_plain_output_590.png", "text_plain_output_509.png", "text_plain_output_337.png", "text_plain_output_562.png", "text_plain_output_499.png", "text_plain_output_196.png", "text_plain_output_807.png", "text_plain_output_342.png", "text_plain_output_563.png", "text_plain_output_97.png", "text_plain_output_729.png", "text_plain_output_717.png", "text_plain_output_786.png", "text_plain_output_227.png", "text_plain_output_453.png", "text_plain_output_33.png", "text_plain_output_650.png", "text_plain_output_150.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_631.png", "text_plain_output_39.png", "text_plain_output_752.png", "text_plain_output_176.png", "text_plain_output_584.png", "text_plain_output_335.png", "text_plain_output_186.png", "text_plain_output_233.png", "text_plain_output_228.png", "text_plain_output_473.png", "text_plain_output_385.png", "text_plain_output_478.png", "text_plain_output_762.png", "text_plain_output_55.png", "text_plain_output_412.png", "text_plain_output_293.png", "text_plain_output_268.png", "text_plain_output_436.png", "text_plain_output_841.png", "text_plain_output_199.png", "text_plain_output_354.png", "text_plain_output_463.png", "text_plain_output_360.png", "text_plain_output_319.png", "text_plain_output_82.png", "text_plain_output_805.png", "text_plain_output_356.png", "text_plain_output_829.png", "text_plain_output_202.png", "text_plain_output_93.png", "text_plain_output_698.png", "text_plain_output_336.png", "text_plain_output_19.png", "text_plain_output_439.png", "text_plain_output_341.png", "text_plain_output_105.png", "text_plain_output_465.png", "text_plain_output_80.png", "text_plain_output_491.png", "text_plain_output_679.png", "text_plain_output_641.png", "text_plain_output_857.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_249.png", "text_plain_output_534.png", "text_plain_output_444.png", "text_plain_output_619.png", "text_plain_output_216.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_323.png", "text_plain_output_694.png", "text_plain_output_402.png", "text_plain_output_755.png", "text_plain_output_722.png", "text_plain_output_424.png", "text_plain_output_486.png", "text_plain_output_597.png", "text_plain_output_250.png", "text_plain_output_11.png", "text_plain_output_481.png", "text_plain_output_560.png", "text_plain_output_526.png", "text_plain_output_400.png", "text_plain_output_804.png", "text_plain_output_524.png", "text_plain_output_538.png", "text_plain_output_12.png", "text_plain_output_267.png", "text_plain_output_553.png", "text_plain_output_838.png", "text_plain_output_408.png", "text_plain_output_425.png", "text_plain_output_591.png", "text_plain_output_811.png", "text_plain_output_428.png", "text_plain_output_416.png", "text_plain_output_856.png", "text_plain_output_625.png", "text_plain_output_194.png", "text_plain_output_577.png", "text_plain_output_727.png", "text_plain_output_747.png", "text_plain_output_782.png", "text_plain_output_519.png", "text_plain_output_62.png", "text_plain_output_733.png", "text_plain_output_721.png", "text_plain_output_480.png", "text_plain_output_757.png", "text_plain_output_303.png", "text_plain_output_810.png", "text_plain_output_621.png", "text_plain_output_377.png", "text_plain_output_440.png", "text_plain_output_95.png", "text_plain_output_339.png", "text_plain_output_458.png", "text_plain_output_464.png", "text_plain_output_156.png", "text_plain_output_547.png", "text_plain_output_298.png", "text_plain_output_369.png", "text_plain_output_348.png", "text_plain_output_587.png", "text_plain_output_448.png", "text_plain_output_364.png", "text_plain_output_365.png", "text_plain_output_815.png", "text_plain_output_61.png", "text_plain_output_792.png", "text_plain_output_585.png", "text_plain_output_352.png", "text_plain_output_83.png", "text_plain_output_374.png", "text_plain_output_647.png", "text_plain_output_472.png", "text_plain_output_566.png", "text_plain_output_397.png", "text_plain_output_600.png", "text_plain_output_661.png", "text_plain_output_389.png", "text_plain_output_292.png", "text_plain_output_351.png", "text_plain_output_852.png", "text_plain_output_135.png", "text_plain_output_285.png", "text_plain_output_574.png", "text_plain_output_582.png", "text_plain_output_306.png", "text_plain_output_675.png", "text_plain_output_493.png", "text_plain_output_46.png" ]
from scipy.io import arff import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.isnull().sum() df.corr() fig, ax = plt.subplots(4, 2, figsize=(20, 20)) sns.distplot(df.Area, bins=40, ax=ax[0, 0]) sns.distplot(df.Perimeter, bins=40, ax=ax[0, 1]) sns.distplot(df.Eccentricity, bins=40, ax=ax[1, 0]) sns.distplot(df.roundness, bins=40, ax=ax[1, 1]) sns.distplot(df.ConvexArea, bins=40, ax=ax[2, 0]) sns.distplot(df.Extent, bins=40, ax=ax[2, 1]) sns.distplot(df.Solidity, bins=40, ax=ax[3, 0]) sns.distplot(df.roundness, bins=40, ax=ax[3, 1])
code
90153165/cell_4
[ "text_plain_output_1.png" ]
from scipy.io import arff import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.head()
code
90153165/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from scipy.io import arff import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.describe().transpose()
code
90153165/cell_11
[ "text_html_output_1.png" ]
from scipy.io import arff import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.isnull().sum() df.corr() fig, ax = plt.subplots(4,2, figsize=(20,20)) sns.distplot(df.Area, bins = 40, ax=ax[0,0]) sns.distplot(df.Perimeter, bins = 40, ax=ax[0,1]) sns.distplot(df.Eccentricity, bins = 40, ax=ax[1,0]) sns.distplot(df.roundness, bins = 40, ax=ax[1,1]) sns.distplot(df.ConvexArea, bins = 40, ax=ax[2,0]) sns.distplot(df.Extent, bins = 40, ax=ax[2,1]) sns.distplot(df.Solidity, bins = 40, ax=ax[3,0]) sns.distplot(df.roundness, bins = 40, ax=ax[3,1]) fig, ax = plt.subplots(4, 2, figsize=(20, 20)) sns.boxplot(df.Area, ax=ax[0, 0]) sns.boxplot(df.Perimeter, ax=ax[0, 1]) sns.boxplot(df.Eccentricity, ax=ax[1, 0]) sns.boxplot(df.roundness, ax=ax[1, 1]) sns.boxplot(df.ConvexArea, ax=ax[2, 0]) sns.boxplot(df.Extent, ax=ax[2, 1]) sns.boxplot(df.Solidity, ax=ax[3, 0]) sns.boxplot(df.roundness, ax=ax[3, 1])
code
90153165/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from scipy.io import arff import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.isnull().sum()
code
90153165/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from scipy.io import arff import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.isnull().sum() df.corr()
code
90153165/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import RobustScaler from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.layers import Dense, Dropout from tensorflow.keras.models import Sequential scaler = RobustScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=25) model = Sequential() model.add(Dense(17, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(9, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(7, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') model.fit(x=X_train, y=y_train, epochs=600, batch_size=32, validation_data=(X_test, y_test), callbacks=[early_stop])
code
90153165/cell_5
[ "text_html_output_1.png" ]
from scipy.io import arff import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from scipy.io import arff data = arff.loadarff('../input/dry-bean-dataset/Dry_Bean_Dataset.arff') df = pd.DataFrame(data[0]) df.info()
code
32070358/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt plt.imread('../input/ckplus/CK+48/fear/S091_001_00000013.png').shape
code
32070358/cell_19
[ "text_plain_output_1.png" ]
from keras import callbacks from keras.layers import Dense , Activation , Dropout ,Flatten from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 plt.imread('../input/ckplus/CK+48/fear/S091_001_00000013.png').shape data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] trainAug = ImageDataGenerator(rotation_range=30, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode='nearest') valAug = ImageDataGenerator() input_shape = (48, 48, 3) "\nmodel = Sequential()\nmodel.add(Conv2D(6, (5, 5), input_shape=input_shape, padding='same', activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (5, 5), padding='same', activation = 'relu'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(7, activation = 'softmax'))\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')\n" def build_cnn(input_shape, show_arch=True): net = Sequential(name='DCNN') net.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=input_shape, activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_1')) net.add(BatchNormalization(name='batchnorm_1')) net.add(Conv2D(filters=64, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_2')) net.add(BatchNormalization(name='batchnorm_2')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1')) net.add(Dropout(0.45, name='dropout_1')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_3')) net.add(BatchNormalization(name='batchnorm_3')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_4')) net.add(BatchNormalization(name='batchnorm_4')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_2')) net.add(Dropout(0.45, name='dropout_2')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_5')) net.add(BatchNormalization(name='batchnorm_5')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_6')) net.add(BatchNormalization(name='batchnorm_6')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_3')) net.add(Dropout(0.5, name='dropout_3')) net.add(Flatten(name='flatten')) net.add(Dense(128, activation='elu', kernel_initializer='he_normal', name='dense_1')) net.add(BatchNormalization(name='batchnorm_7')) net.add(Dropout(0.6, name='dropout_4')) net.add(Dense(num_classes, activation='softmax', name='out_layer')) net.summary() return net model = build_cnn(input_shape) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') model.summary() from keras import callbacks filename = 'model_train_new.csv' filepath = 'Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5' csv_log = callbacks.CSVLogger(filename, separator=',', append=False) checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [csv_log, checkpoint] callbacks_list = [csv_log] hist = model.fit_generator(trainAug.flow(X_train, y_train, batch_size=7), steps_per_epoch=len(X_train) // 7, validation_data=valAug.flow(X_test, y_test), validation_steps=len(X_test) // 7, epochs=50, callbacks=callbacks_list) score = model.evaluate(X_test, y_test, verbose=0) np.argmax(y_test[9]) test_image = X_test[0:1] print(test_image.shape) print(model.predict(test_image)) print(model.predict_classes(test_image)) print(y_test[0:1]) res = model.predict_classes(X_test[9:18]) plt.figure(figsize=(10, 10)) for i in range(0, 9): plt.subplot(330 + 1 + i) plt.imshow(x_test[i], cmap=plt.get_cmap('gray')) plt.gca().get_xaxis().set_ticks([]) plt.gca().get_yaxis().set_ticks([]) plt.xlabel('true = %s' % getLabel(np.argmax(y_test[i + 9]))) plt.ylabel('prediction = %s' % getLabel(res[i]), fontsize=14) plt.show()
code
32070358/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd "\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n"
code
32070358/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] img_data.shape[0]
code
32070358/cell_18
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] np.argmax(y_test[9])
code
32070358/cell_15
[ "text_plain_output_1.png" ]
from keras import callbacks from keras.layers import Dense , Activation , Dropout ,Flatten from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] trainAug = ImageDataGenerator(rotation_range=30, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode='nearest') valAug = ImageDataGenerator() input_shape = (48, 48, 3) "\nmodel = Sequential()\nmodel.add(Conv2D(6, (5, 5), input_shape=input_shape, padding='same', activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (5, 5), padding='same', activation = 'relu'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(7, activation = 'softmax'))\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')\n" def build_cnn(input_shape, show_arch=True): net = Sequential(name='DCNN') net.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=input_shape, activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_1')) net.add(BatchNormalization(name='batchnorm_1')) net.add(Conv2D(filters=64, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_2')) net.add(BatchNormalization(name='batchnorm_2')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1')) net.add(Dropout(0.45, name='dropout_1')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_3')) net.add(BatchNormalization(name='batchnorm_3')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_4')) net.add(BatchNormalization(name='batchnorm_4')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_2')) net.add(Dropout(0.45, name='dropout_2')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_5')) net.add(BatchNormalization(name='batchnorm_5')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_6')) net.add(BatchNormalization(name='batchnorm_6')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_3')) net.add(Dropout(0.5, name='dropout_3')) net.add(Flatten(name='flatten')) net.add(Dense(128, activation='elu', kernel_initializer='he_normal', name='dense_1')) net.add(BatchNormalization(name='batchnorm_7')) net.add(Dropout(0.6, name='dropout_4')) net.add(Dense(num_classes, activation='softmax', name='out_layer')) net.summary() return net model = build_cnn(input_shape) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') model.summary() from keras import callbacks filename = 'model_train_new.csv' filepath = 'Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5' csv_log = callbacks.CSVLogger(filename, separator=',', append=False) checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [csv_log, checkpoint] callbacks_list = [csv_log] hist = model.fit_generator(trainAug.flow(X_train, y_train, batch_size=7), steps_per_epoch=len(X_train) // 7, validation_data=valAug.flow(X_test, y_test), validation_steps=len(X_test) // 7, epochs=50, callbacks=callbacks_list)
code
32070358/cell_16
[ "text_plain_output_1.png" ]
from keras import callbacks from keras.layers import Dense , Activation , Dropout ,Flatten from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential from keras.preprocessing.image import ImageDataGenerator import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] trainAug = ImageDataGenerator(rotation_range=30, zoom_range=0.15, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15, horizontal_flip=True, fill_mode='nearest') valAug = ImageDataGenerator() input_shape = (48, 48, 3) "\nmodel = Sequential()\nmodel.add(Conv2D(6, (5, 5), input_shape=input_shape, padding='same', activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (5, 5), padding='same', activation = 'relu'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(7, activation = 'softmax'))\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')\n" def build_cnn(input_shape, show_arch=True): net = Sequential(name='DCNN') net.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=input_shape, activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_1')) net.add(BatchNormalization(name='batchnorm_1')) net.add(Conv2D(filters=64, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_2')) net.add(BatchNormalization(name='batchnorm_2')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1')) net.add(Dropout(0.45, name='dropout_1')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_3')) net.add(BatchNormalization(name='batchnorm_3')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_4')) net.add(BatchNormalization(name='batchnorm_4')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_2')) net.add(Dropout(0.45, name='dropout_2')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_5')) net.add(BatchNormalization(name='batchnorm_5')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_6')) net.add(BatchNormalization(name='batchnorm_6')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_3')) net.add(Dropout(0.5, name='dropout_3')) net.add(Flatten(name='flatten')) net.add(Dense(128, activation='elu', kernel_initializer='he_normal', name='dense_1')) net.add(BatchNormalization(name='batchnorm_7')) net.add(Dropout(0.6, name='dropout_4')) net.add(Dense(num_classes, activation='softmax', name='out_layer')) net.summary() return net model = build_cnn(input_shape) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') model.summary() from keras import callbacks filename = 'model_train_new.csv' filepath = 'Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5' csv_log = callbacks.CSVLogger(filename, separator=',', append=False) checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [csv_log, checkpoint] callbacks_list = [csv_log] hist = model.fit_generator(trainAug.flow(X_train, y_train, batch_size=7), steps_per_epoch=len(X_train) // 7, validation_data=valAug.flow(X_test, y_test), validation_steps=len(X_test) // 7, epochs=50, callbacks=callbacks_list) score = model.evaluate(X_test, y_test, verbose=0) print('Test Loss:', score[0]) print('Test accuracy:', score[1])
code
32070358/cell_3
[ "text_plain_output_1.png" ]
import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 print(f'{dir1} has {count} images')
code
32070358/cell_17
[ "text_plain_output_1.png" ]
y_test[9:18]
code
32070358/cell_14
[ "text_plain_output_1.png" ]
from keras import callbacks from keras import callbacks filename = 'model_train_new.csv' filepath = 'Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5' csv_log = callbacks.CSVLogger(filename, separator=',', append=False) checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [csv_log, checkpoint] callbacks_list = [csv_log] callbacks_list
code
32070358/cell_12
[ "text_plain_output_1.png" ]
from keras.layers import Dense , Activation , Dropout ,Flatten from keras.layers.convolutional import Conv2D from keras.layers.convolutional import MaxPooling2D from keras.layers.normalization import BatchNormalization from keras.models import Sequential import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape num_classes = 7 num_of_samples = img_data.shape[0] labels = np.ones((num_of_samples,), dtype='int64') labels[0:134] = 0 labels[135:188] = 1 labels[189:365] = 2 labels[366:440] = 3 labels[441:647] = 4 labels[648:731] = 5 labels[732:980] = 6 names = ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'] def getLabel(id): return ['anger', 'contempt', 'disgust', 'fear', 'happy', 'sadness', 'surprise'][id] input_shape = (48, 48, 3) "\nmodel = Sequential()\nmodel.add(Conv2D(6, (5, 5), input_shape=input_shape, padding='same', activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (5, 5), padding='same', activation = 'relu'))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(64, (3, 3), activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(128, activation = 'relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(7, activation = 'softmax'))\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')\n" def build_cnn(input_shape, show_arch=True): net = Sequential(name='DCNN') net.add(Conv2D(filters=64, kernel_size=(3, 3), input_shape=input_shape, activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_1')) net.add(BatchNormalization(name='batchnorm_1')) net.add(Conv2D(filters=64, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_2')) net.add(BatchNormalization(name='batchnorm_2')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_1')) net.add(Dropout(0.45, name='dropout_1')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_3')) net.add(BatchNormalization(name='batchnorm_3')) net.add(Conv2D(filters=128, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_4')) net.add(BatchNormalization(name='batchnorm_4')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_2')) net.add(Dropout(0.45, name='dropout_2')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_5')) net.add(BatchNormalization(name='batchnorm_5')) net.add(Conv2D(filters=256, kernel_size=(3, 3), activation='elu', padding='same', kernel_initializer='he_normal', name='conv2d_6')) net.add(BatchNormalization(name='batchnorm_6')) net.add(MaxPooling2D(pool_size=(2, 2), name='maxpool2d_3')) net.add(Dropout(0.5, name='dropout_3')) net.add(Flatten(name='flatten')) net.add(Dense(128, activation='elu', kernel_initializer='he_normal', name='dense_1')) net.add(BatchNormalization(name='batchnorm_7')) net.add(Dropout(0.6, name='dropout_4')) net.add(Dense(num_classes, activation='softmax', name='out_layer')) net.summary() return net model = build_cnn(input_shape) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') model.summary()
code
32070358/cell_5
[ "text_plain_output_1.png" ]
import cv2 import numpy as np import numpy as np # linear algebra import os data_path = '../input/ckplus/CK+48/' for dir1 in os.listdir(data_path): count = 0 for f in os.listdir(data_path + dir1): count += 1 data_path = '../input/ckplus/CK+48' data_dir_list = os.listdir(data_path) img_rows = 256 img_cols = 256 num_channel = 1 num_epoch = 10 img_data_list = [] for dataset in data_dir_list: img_list = os.listdir(data_path + '/' + dataset) for img in img_list: input_img = cv2.imread(data_path + '/' + dataset + '/' + img) input_img_resize = cv2.resize(input_img, (48, 48)) img_data_list.append(input_img_resize) img_data = np.array(img_data_list) img_data = img_data.astype('float32') img_data = img_data / 255 img_data.shape
code
16118884/cell_63
[ "text_html_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] top_10_df[top_10_df['ReviewRatio'] > 1]
code
16118884/cell_13
[ "text_html_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.head(3)
code
16118884/cell_56
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] paid_apps = top_10_df['Price'] paid_apps = top_10_df[top_10_df['Price'] > 0] paid_apps[paid_apps['Price'] > 50]
code
16118884/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) plt.figure(figsize=(13, 8)) app_data['Installs'].value_counts().plot(kind='bar') plt.title('Count of Popular Apps in our Dataset') plt.ylabel('Count') plt.xlabel('Installs') plt.show()
code
16118884/cell_65
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] paid_apps = top_10_df['Price'] paid_apps = top_10_df[top_10_df['Price'] > 0] paid_apps[paid_apps['Price'] > 50] top_10_df['ReviewRatio'].hist() plt.title('Review Ratio Distribution for Apps in Top 10 Bins') plt.xlabel('Review Ratio') plt.ylabel('App Count') plt.show()
code
16118884/cell_54
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] paid_apps = top_10_df['Price'] paid_apps.hist() plt.title('Pricing Distribution For Apps In Top 10 Bins') plt.ylabel('App Count') plt.xlabel('Price (U.S $)') plt.show()
code
16118884/cell_67
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10 = np.unique(app_data['Installs']) top_10 = sorted(top_10, key=len, reverse=True)[:10] del top_10 installs = [np.int(i.replace('+', '').replace(',', '')) for i in app_data['Installs']] app_data['Installs'] = [i for i in installs] del installs top_10_df = app_data[app_data['Installs'] >= 50000] # A dataframe to contain the most reviewed app from each bin: most_reviewed = pd.DataFrame() # Get the most reviewed app from each bin and add it to the most_reviewed dataframe for bins in np.unique(top_10_df["Installs"]): top_row = top_10_df[top_10_df["Installs"] == bins] top_row = top_row.sort_values(by="ReviewRatio", ascending=False) top_row = top_row.head(1) most_reviewed = most_reviewed.append(top_row) # Clear this dataframe of irrelevant columns for enhanced visibility most_reviewed = most_reviewed.drop(columns=["Category","Type","Price","Content Rating","Genres","Last Updated"]) most_reviewed print('68th: {}'.format(round(np.percentile(top_10_df['ReviewRatio'], 68), 4))) print('95th: {}'.format(round(np.percentile(top_10_df['ReviewRatio'], 95), 4))) print('99th: {}'.format(round(np.percentile(top_10_df['ReviewRatio'], 99), 4))) print('Max: {}'.format(max(top_10_df['ReviewRatio'])))
code
16118884/cell_60
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] paid_apps = top_10_df['Price'] paid_apps = top_10_df[top_10_df['Price'] > 0] paid_apps[paid_apps['Price'] > 50] print('$ {}'.format(paid_apps['Price'].min()))
code
16118884/cell_19
[ "text_html_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') app_data.head()
code
16118884/cell_45
[ "text_plain_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] top_10_df['Rating'].describe()
code
16118884/cell_49
[ "text_html_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] top_10_df[top_10_df['Rating'] > 4.7].sort_values(by=['Rating', 'ReviewRatio'], ascending=False).head()
code
16118884/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] print(str(round(len(top_10_df) / len(app_data) * 100, 0)) + '%')
code
16118884/cell_58
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] paid_apps = top_10_df['Price'] paid_apps = top_10_df[top_10_df['Price'] > 0] paid_apps[paid_apps['Price'] > 50] paid_apps[paid_apps['Price'] < 40]['Price'].hist() plt.title('Pricing Distribution For Apps Costing Less Than $40') plt.ylabel('App Count') plt.xlabel('Price (U.S $)')
code
16118884/cell_28
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10 = np.unique(app_data['Installs']) top_10 = sorted(top_10, key=len, reverse=True)[:10] print(top_10) del top_10
code
16118884/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') print('Missing Values' + '\n' + '-' * 15) app_data.isnull().sum()
code
16118884/cell_38
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10 = np.unique(app_data['Installs']) top_10 = sorted(top_10, key=len, reverse=True)[:10] del top_10 installs = [np.int(i.replace('+', '').replace(',', '')) for i in app_data['Installs']] app_data['Installs'] = [i for i in installs] del installs top_10_df = app_data[app_data['Installs'] >= 50000] # A dataframe to contain the most reviewed app from each bin: most_reviewed = pd.DataFrame() # Get the most reviewed app from each bin and add it to the most_reviewed dataframe for bins in np.unique(top_10_df["Installs"]): top_row = top_10_df[top_10_df["Installs"] == bins] top_row = top_row.sort_values(by="ReviewRatio", ascending=False) top_row = top_row.head(1) most_reviewed = most_reviewed.append(top_row) # Clear this dataframe of irrelevant columns for enhanced visibility most_reviewed = most_reviewed.drop(columns=["Category","Type","Price","Content Rating","Genres","Last Updated"]) most_reviewed highest_rated = most_reviewed.sort_values(by='Rating', ascending=False).head(3) highest_rated
code
16118884/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] len(top_10_df[top_10_df['Rating'] > 4.7])
code
16118884/cell_17
[ "text_html_output_1.png" ]
import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data.head(4)
code
16118884/cell_43
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10_df = app_data[app_data['Installs'] >= 50000] top_10_df['Rating'].hist() plt.title(' Google Play Apps Rating Distribution') plt.ylabel('App Count') plt.xlabel('Rating out of 5.0') plt.show()
code
16118884/cell_36
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd app_data = pd.read_csv('../input/googleplaystore.csv') app_data.isnull().sum() app_data = app_data.sort_values(by='Installs', ascending=False) app_data = app_data.sort_values(by='Installs') nan_rows = list(app_data[app_data['Rating'].isna()].index) nan_rows.append(10472) app_data = app_data.drop(nan_rows, axis=0) app_data = app_data.drop(columns=['Size', 'Current Ver', 'Android Ver']) app_data = app_data.sort_values(by='Installs', ascending=False) top_10 = np.unique(app_data['Installs']) top_10 = sorted(top_10, key=len, reverse=True)[:10] del top_10 installs = [np.int(i.replace('+', '').replace(',', '')) for i in app_data['Installs']] app_data['Installs'] = [i for i in installs] del installs top_10_df = app_data[app_data['Installs'] >= 50000] most_reviewed = pd.DataFrame() for bins in np.unique(top_10_df['Installs']): top_row = top_10_df[top_10_df['Installs'] == bins] top_row = top_row.sort_values(by='ReviewRatio', ascending=False) top_row = top_row.head(1) most_reviewed = most_reviewed.append(top_row) most_reviewed = most_reviewed.drop(columns=['Category', 'Type', 'Price', 'Content Rating', 'Genres', 'Last Updated']) most_reviewed
code
2036883/cell_13
[ "text_html_output_1.png" ]
from matplotlib import style from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) X = combined_mean_modelset.iloc[:, :-1].values Y = combined_mean_modelset.iloc[:, 11].values X = pd.DataFrame(X) X.dtypes from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X = LabelEncoder() X[:, 0] = labelencoder_X.fit_transform(X[:, 0]) X = pd.DataFrame(X) X_extra = pd.get_dummies(X.iloc[:, 0]) X_extra.head()
code
2036883/cell_9
[ "text_html_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) train_new = combined_mean_modelset[0:8523] test_new = combined_mean_modelset[8523:] test_new.shape
code
2036883/cell_4
[ "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) combined_mean_modelset.tail()
code
2036883/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) combined_median_modelset = combined_set.copy(deep=True) combined_median_modelset['Item_Outlet_Sales'] = combined_median_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=1794.3) combined_median_modelset.tail()
code
2036883/cell_11
[ "text_html_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) X = combined_mean_modelset.iloc[:, :-1].values print(X) Y = combined_mean_modelset.iloc[:, 11].values print(Y)
code
2036883/cell_1
[ "image_output_5.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') print(train.head()) print(test.head()) train.info() print(train.dtypes) print(train.describe(include='all')) print(train.shape) test.info() print(test.dtypes) print(test.describe(include='all')) print(test.shape) test['Item_Outlet_Sales'] = np.nan print(test.head()) combined_set = train.append(test) print(combined_set.head()) print(combined_set.describe(include='all')) print(combined_set.shape) print(combined_set.max()) print(combined_set.min()) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) print(combined_meanset.head(10)) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) print(combined_medianset.head(10)) print(combined_set.head(10)) combined_meanset['Item_Weight'].hist(color='white', edgecolor='green') plt.title('Mean Histogram') plt.xlabel('X-axis') plt.ylabel('Item_Weight') plt.show() combined_medianset['Item_Weight'].hist(color='white', edgecolor='green') plt.title('Median Histogram') plt.xlabel('X-axis') plt.ylabel('Item_Weight') plt.show() combined_set['Item_Weight'].hist(color='white', edgecolor='green') plt.title('Actual Value Histogram') plt.xlabel('X-axis') plt.ylabel('Item_Weight') plt.show() combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() plt.show() combined_set.boxplot() plt.plot() combined_set.boxplot('Item_Visibility', figsize=(20, 8)) plt.plot() combined_set.boxplot('Outlet_Establishment_Year', figsize=(12, 8)) plt.plot() print(combined_set.groupby('Item_Fat_Content').Outlet_Size.value_counts(dropna=False)) print(combined_set.groupby('Item_Type').Outlet_Size.value_counts(dropna=False)) print(combined_set.groupby('Outlet_Identifier').Outlet_Size.value_counts(dropna=False)) print(combined_set.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False)) print(combined_set.groupby('Outlet_Location_Type').Outlet_Size.value_counts(dropna=False)) combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) print(combined_set.head()) combined_set['Outlet_Size'].hist(color='white', edgecolor='blue') plt.title('Histogram of Outlet_Size') plt.xlabel('X-axis') plt.ylabel('Outlet_Size') plt.show() combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) print(combined_set.tail()) print(combined_set.shape[0]) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 print(train[Outliers]) print(combined_set['Item_Fat_Content'].value_counts()) combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) print(combined_set.head())
code
2036883/cell_7
[ "text_html_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) train_new = combined_mean_modelset[0:8523] print(train_new.tail()) test_new = combined_mean_modelset[8523:] print(test_new.tail())
code
2036883/cell_8
[ "text_html_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) train_new = combined_mean_modelset[0:8523] test_new = combined_mean_modelset[8523:] train_new.shape
code
2036883/cell_15
[ "text_plain_output_1.png" ]
from matplotlib import style from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) X = combined_mean_modelset.iloc[:, :-1].values Y = combined_mean_modelset.iloc[:, 11].values X = pd.DataFrame(X) X.dtypes from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X = LabelEncoder() X[:, 0] = labelencoder_X.fit_transform(X[:, 0]) X = pd.DataFrame(X) X_extra = pd.get_dummies(X.iloc[:, 0]) from sklearn.preprocessing import LabelEncoder, OneHotEncoder X.iloc[:, 2] = labelencoder_X.fit_transform(X.iloc[:, 2]) X_extra = pd.get_dummies(X.iloc[:, 2]) from sklearn.preprocessing import LabelEncoder, OneHotEncoder X.iloc[:, 4] = labelencoder_X.fit_transform(X.iloc[:, 4]) X_extra = pd.get_dummies(X.iloc[:, 4]) X_extra.head()
code
2036883/cell_3
[ "text_html_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) print(combined_mean_modelset.tail())
code
2036883/cell_14
[ "text_plain_output_1.png" ]
from matplotlib import style from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) X = combined_mean_modelset.iloc[:, :-1].values Y = combined_mean_modelset.iloc[:, 11].values X = pd.DataFrame(X) X.dtypes from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X = LabelEncoder() X[:, 0] = labelencoder_X.fit_transform(X[:, 0]) X = pd.DataFrame(X) X_extra = pd.get_dummies(X.iloc[:, 0]) from sklearn.preprocessing import LabelEncoder, OneHotEncoder X.iloc[:, 2] = labelencoder_X.fit_transform(X.iloc[:, 2]) X_extra = pd.get_dummies(X.iloc[:, 2]) X_extra.head()
code
2036883/cell_10
[ "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) combined_mean_modelset.tail()
code
2036883/cell_12
[ "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) X = combined_mean_modelset.iloc[:, :-1].values Y = combined_mean_modelset.iloc[:, 11].values X = pd.DataFrame(X) X.dtypes
code
2036883/cell_5
[ "text_plain_output_1.png" ]
from matplotlib import style from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd from matplotlib import pyplot as plt from matplotlib import style style.use('ggplot') from sklearn.preprocessing import Imputer from sklearn.linear_model import LinearRegression from sklearn.preprocessing import scale from collections import Counter import seaborn as sns from sklearn.preprocessing import LabelEncoder, OneHotEncoder from subprocess import check_output train = pd.read_csv('../input/training-set/Train_A102.csv') test = pd.read_csv('../input/test-a102csv/Test_A102 (1).csv') test['Item_Outlet_Sales'] = np.nan combined_set = train.append(test) combined_set.replace('', np.NaN) combined_meanset = combined_set.copy(deep=True) combined_meanset['Item_Weight'].fillna(value=12.792854, inplace=True) combined_medianset = combined_set.copy(deep=True) combined_medianset['Item_Weight'].fillna(value=12.6, inplace=True) combined_set['Item_Weight'].fillna(value=12.6, inplace=True) train.plot.density() combined_set['Outlet_Size'].fillna(value='Medium', inplace=True) combined_set['Item_Outlet_Sales'].fillna(value=-999, inplace=True) X = train.iloc[:, 0:].values item_outlet_sales = X[:, 11] Outliers = item_outlet_sales > 6501 combined_set['Item_Fat_Content'] = combined_set['Item_Fat_Content'].replace({'low fat': 'LF', 'Low Fat': 'LF', 'Regular': 'reg'}) combined_mean_modelset = combined_set.copy(deep=True) combined_mean_modelset['Item_Outlet_Sales'] = combined_mean_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=2181.3) combined_median_modelset = combined_set.copy(deep=True) combined_median_modelset['Item_Outlet_Sales'] = combined_median_modelset['Item_Outlet_Sales'].replace(to_replace=-999, value=1794.3) print(combined_median_modelset.tail())
code
129018697/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean() plt.figure(figsize=(16, 6)) heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True) heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12); pd.plotting.scatter_matrix(train[['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange']], figsize=(10, 10), diagonal='hist') plt.show()
code
129018697/cell_4
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.head()
code
129018697/cell_6
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean()
code
129018697/cell_19
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA from sklearn.metrics import mean_absolute_error from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import lightgbm as lgb import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean() plt.figure(figsize=(16, 6)) heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True) heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12); from sklearn.decomposition import PCA def perform_feature_engineering(df): df['total_bees'] = df['honeybee'] + df['bumbles'] + df['andrena'] + df['osmia'] df['temperature_range'] = df['MaxOfUpperTRange'] - df['MinOfUpperTRange'] + df['MaxOfLowerTRange'] - df['MinOfLowerTRange'] df['rainfall_intensity'] = df['RainingDays'] * df['AverageRainingDays'] df['fruit_quality'] = df['fruitset'] * df['fruitmass'] df['seed_to_fruit_ratio'] = df['seeds'] / df['fruitmass'] temp_range_features = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] pca = PCA(n_components=1) df[['temperature_pca']] = pca.fit_transform(df[temp_range_features]) df = df.drop(temp_range_features, axis=1) return df train = perform_feature_engineering(train) test = perform_feature_engineering(test) train.columns features = ['clonesize', 'honeybee', 'bumbles', 'andrena', 'osmia', 'RainingDays', 'AverageRainingDays', 'fruitset', 'fruitmass', 'seeds', 'total_bees', 'temperature_range', 'rainfall_intensity', 'fruit_quality', 'seed_to_fruit_ratio', 'temperature_pca'] target = 'yield' from sklearn.model_selection import train_test_split X = train[features] y = train[target] import lightgbm as lgb from sklearn.model_selection import KFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_absolute_error params_lgb = {'n_estimators': 9999, 'learning_rate': 0.03, 'max_depth': 5, 'num_leaves': 16, 'subsample': 0.7, 'colsample_bytree': 0.8, 'reg_lambda': 5e-07, 'objective': 'regression_l1', 'metric': 'mean_absolute_error', 'boosting_type': 'gbdt', 'device': 'GPU', 'random_state': 42} n_splits = 5 kf = KFold(n_splits=n_splits, shuffle=True, random_state=42) lgb_model = Pipeline([('scaler', StandardScaler()), ('lgbm', lgb.LGBMRegressor(**params_lgb))]) mae_scores = [] for train_index, test_index in kf.split(X): X_train, y_train = (X.iloc[train_index], y.iloc[train_index]) X_test, y_test = (X.iloc[test_index], y.iloc[test_index]) lgb_model.fit(X_train, y_train) y_pred_lgb = lgb_model.predict(X_test) mae_lgb = mean_absolute_error(y_test, y_pred_lgb) mae_scores.append(mae_lgb) print('Mean absolute error:', np.mean(mae_scores)) print('Scores:', mae_scores)
code
129018697/cell_1
[ "text_plain_output_1.png" ]
import os import warnings import warnings warnings.filterwarnings('ignore') import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129018697/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean() plt.figure(figsize=(16, 6)) heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True) heatmap.set_title('Correlation Heatmap', fontdict={'fontsize': 12}, pad=12)
code
129018697/cell_16
[ "text_html_output_1.png" ]
from sklearn.decomposition import PCA import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean() plt.figure(figsize=(16, 6)) heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True) heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12); from sklearn.decomposition import PCA def perform_feature_engineering(df): df['total_bees'] = df['honeybee'] + df['bumbles'] + df['andrena'] + df['osmia'] df['temperature_range'] = df['MaxOfUpperTRange'] - df['MinOfUpperTRange'] + df['MaxOfLowerTRange'] - df['MinOfLowerTRange'] df['rainfall_intensity'] = df['RainingDays'] * df['AverageRainingDays'] df['fruit_quality'] = df['fruitset'] * df['fruitmass'] df['seed_to_fruit_ratio'] = df['seeds'] / df['fruitmass'] temp_range_features = ['MaxOfUpperTRange', 'MinOfUpperTRange', 'AverageOfUpperTRange', 'MaxOfLowerTRange', 'MinOfLowerTRange', 'AverageOfLowerTRange'] pca = PCA(n_components=1) df[['temperature_pca']] = pca.fit_transform(df[temp_range_features]) df = df.drop(temp_range_features, axis=1) return df train = perform_feature_engineering(train) test = perform_feature_engineering(test) train.columns
code
129018697/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T train.isna().mean() plt.figure(figsize=(16, 6)) heatmap = sns.heatmap(train.corr(), vmin=-1, vmax=1, annot=True) heatmap.set_title('Correlation Heatmap', fontdict={'fontsize':12}, pad=12); sns.boxplot(x='RainingDays', y='fruitset', data=train) plt.xlabel('Raining Days') plt.ylabel('Fruitset (%)') plt.title('Raining Days vs Fruitset') plt.show()
code
129018697/cell_5
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv') test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv') sample = pd.read_csv('/kaggle/input/playground-series-s3e14/sample_submission.csv') train.describe().T
code
72081303/cell_9
[ "text_plain_output_1.png" ]
from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier import numpy as np import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') train_labels = train_df['Survived'] train_df = train_df.drop(['Survived'], axis=1) df = pd.concat([train_df, test_df]) def substrings_in_string(big_string, substrings): for substring in substrings: if str(big_string).find(substring) != -1: return substring return np.nan title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer'] df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list)) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] df['Deck'] = df['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) df['Alone'] = (df['SibSp'] + df['Parch'] == 0).astype('object') numeric_columns = ['Age', 'SibSp', 'Parch', 'Fare'] categorical_columns = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'Alone'] df['Pclass'] = df['Pclass'].astype('object') data = pd.get_dummies(df[numeric_columns + categorical_columns], dummy_na=True) data[numeric_columns] -= data.mean() data[numeric_columns] /= data.std() data[numeric_columns] = data[numeric_columns].fillna(0) n_train = len(train_labels) train_data = data.iloc[:n_train, :].values test_data = data.iloc[n_train:, :].values estimators = {} estimator = XGBClassifier() cv = GridSearchCV(estimator=estimator, param_grid={'n_estimators': list(range(70, 90)), 'max_depth': list(range(2, 7)), 'eval_metric': ['logloss'], 'use_label_encoder': [False]}) cv.fit(train_data, train_labels) print(cv.best_score_) print(cv.best_params_) estimators['xgboost'] = cv.best_estimator_
code
72081303/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') train_labels = train_df['Survived'] train_df = train_df.drop(['Survived'], axis=1) df = pd.concat([train_df, test_df]) def substrings_in_string(big_string, substrings): for substring in substrings: if str(big_string).find(substring) != -1: return substring return np.nan title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer'] df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list)) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] df['Deck'] = df['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) df['Alone'] = (df['SibSp'] + df['Parch'] == 0).astype('object') numeric_columns = ['Age', 'SibSp', 'Parch', 'Fare'] categorical_columns = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'Alone'] df['Pclass'] = df['Pclass'].astype('object') df[categorical_columns].describe()
code
72081303/cell_3
[ "text_html_output_1.png" ]
import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') train_labels = train_df['Survived'] train_df = train_df.drop(['Survived'], axis=1) df = pd.concat([train_df, test_df]) df.head()
code
72081303/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import GridSearchCV from xgboost import XGBClassifier import numpy as np import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') train_labels = train_df['Survived'] train_df = train_df.drop(['Survived'], axis=1) df = pd.concat([train_df, test_df]) def substrings_in_string(big_string, substrings): for substring in substrings: if str(big_string).find(substring) != -1: return substring return np.nan title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer'] df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list)) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] df['Deck'] = df['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) df['Alone'] = (df['SibSp'] + df['Parch'] == 0).astype('object') numeric_columns = ['Age', 'SibSp', 'Parch', 'Fare'] categorical_columns = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'Alone'] df['Pclass'] = df['Pclass'].astype('object') data = pd.get_dummies(df[numeric_columns + categorical_columns], dummy_na=True) data[numeric_columns] -= data.mean() data[numeric_columns] /= data.std() data[numeric_columns] = data[numeric_columns].fillna(0) n_train = len(train_labels) train_data = data.iloc[:n_train, :].values test_data = data.iloc[n_train:, :].values estimators = {} estimator = XGBClassifier() cv = GridSearchCV(estimator=estimator, param_grid={'n_estimators': list(range(70, 90)), 'max_depth': list(range(2, 7)), 'eval_metric': ['logloss'], 'use_label_encoder': [False]}) cv.fit(train_data, train_labels) estimators['xgboost'] = cv.best_estimator_ estimator = RandomForestClassifier() cv = GridSearchCV(estimator=estimator, param_grid={'n_estimators': list(range(200, 240))}) cv.fit(train_data, train_labels) print(cv.best_score_) print(cv.best_params_) estimators['random_forest'] = cv.best_estimator_
code
72081303/cell_5
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd train_df = pd.read_csv('../input/titanic/train.csv') test_df = pd.read_csv('../input/titanic/test.csv') train_labels = train_df['Survived'] train_df = train_df.drop(['Survived'], axis=1) df = pd.concat([train_df, test_df]) def substrings_in_string(big_string, substrings): for substring in substrings: if str(big_string).find(substring) != -1: return substring return np.nan title_list = ['Mrs', 'Mr', 'Master', 'Miss', 'Major', 'Rev', 'Dr', 'Ms', 'Mlle', 'Col', 'Capt', 'Mme', 'Countess', 'Don', 'Jonkheer'] df['Title'] = df['Name'].map(lambda x: substrings_in_string(x, title_list)) cabin_list = ['A', 'B', 'C', 'D', 'E', 'F', 'T', 'G', 'Unknown'] df['Deck'] = df['Cabin'].map(lambda x: substrings_in_string(x, cabin_list)) df['Alone'] = (df['SibSp'] + df['Parch'] == 0).astype('object') numeric_columns = ['Age', 'SibSp', 'Parch', 'Fare'] categorical_columns = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'Alone'] df['Pclass'] = df['Pclass'].astype('object') df[numeric_columns].describe()
code
88087713/cell_42
[ "text_plain_output_1.png" ]
se
code
88087713/cell_21
[ "text_html_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes
code
88087713/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.head(5)
code
88087713/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data['club_member_status'].value_counts()
code
88087713/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True) sample_trans_data.isna().sum() sample_trans_data.info()
code
88087713/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) sns.set_style('whitegrid') customers_data_new['age'].plot(kind='hist')
code
88087713/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.head(5)
code
88087713/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True) sample_trans_data.isna().sum()
code
88087713/cell_39
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True) sample_trans_data.isna().sum() sns.set_style('whitegrid') interval_range_age = pd.interval_range(start=0, freq=10, end=100) customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age) customers_data_new.isna().sum() purchases_2019 = sample_trans_data.merge(customers_data_new, how='left', on='customer_id') customers_temp = purchases_2019.groupby(['age_group'])['customer_id'].count() data_temp_customer = pd.DataFrame({'Group Age': customers_temp.index, 'Customers': customers_temp.values}) data_temp_customer = data_temp_customer.sort_values(['Group Age'], ascending=False) plt.figure(figsize=(7, 7)) plt.title(f'Group Age') sns.set_color_codes('pastel') s = sns.barplot(x='Group Age', y='Customers', data=data_temp_customer) s.set_xticklabels(s.get_xticklabels(), rotation=45) locs, labels = plt.xticks() plt.show
code
88087713/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum()
code
88087713/cell_41
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') customers_data_new = customers_data[customers_data['club_member_status'] == 'ACTIVE'] customers_data_new.drop(labels=['FN', 'Active', 'club_member_status', 'fashion_news_frequency'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) customers_data_new.drop(labels=['postal_code'], axis=1, inplace=True) customers_data_new.reset_index(drop=True, inplace=True) trans_data.dtypes trans_data['t_dat'] = trans_data['t_dat'].astype('datetime64') trans_data.dtypes sample_trans_data = trans_data[trans_data['year_trans'] == 2019] sample_trans_data.isna().sum() sample_trans_data.drop(labels=['t_dat', 'sales_channel_id'], axis=1, inplace=True) sample_trans_data.reset_index(drop=True, inplace=True) sample_trans_data.isna().sum() sns.set_style('whitegrid') interval_range_age = pd.interval_range(start=0, freq=10, end=100) customers_data_new['age_group'] = pd.cut(customers_data_new['age'], bins=interval_range_age) customers_data_new.isna().sum() purchases_2019 = sample_trans_data.merge(customers_data_new, how='left', on='customer_id') customers_temp = purchases_2019.groupby(['age_group'])['customer_id'].count() data_temp_customer = pd.DataFrame({ 'Group Age' : customers_temp.index, 'Customers' : customers_temp.values }) data_temp_customer = data_temp_customer.sort_values(['Group Age'],ascending=False) plt.figure(figsize=(7,7)) plt.title(f'Group Age') sns.set_color_codes('pastel') s = sns.barplot(x='Group Age', y='Customers', data=data_temp_customer) s.set_xticklabels(s.get_xticklabels(),rotation=45) locs, labels = plt.xticks() plt.show most_age_group_transaction = purchases_2019[purchases_2019['age_group'] == purchases_2019['age_group'].mode()[0]] customers_temp_most = most_age_group_transaction.groupby(['day_trans'])['customer_id'].count() data_temp_customer_most = pd.DataFrame({'Day Transaction': customers_temp_most.index, 'Customers': customers_temp_most.values}) data_temp_customer_most = data_temp_customer_most.sort_values(['Customers'], ascending=False) plt.figure(figsize=(7, 7)) plt.title(f'Day Transaction of Most Age Group Purchases') sns.set_color_codes('pastel') s = sns.barplot(x='Day Transaction', y='Customers', data=data_temp_customer_most) s.set_xticklabels(s.get_xticklabels()) locs, labels = plt.xticks() plt.show()
code
88087713/cell_19
[ "text_html_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') articles_data_new = articles_data[['prod_name', 'product_type_name', 'product_group_name']].copy() articles_data_new.isna().sum() articles_data_new.info()
code
88087713/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd articles_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/articles.csv') customers_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/customers.csv') submission_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/sample_submission.csv') trans_data = pd.read_csv('/kaggle/input/h-and-m-personalized-fashion-recommendations/transactions_train.csv') articles_data_new = articles_data[['prod_name', 'product_type_name', 'product_group_name']].copy() articles_data_new.isna().sum()
code