path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17139154/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';') df.shape corr = df.corr() plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1) plt.xticks(range(len(corr.columns)), corr.columns, rotation=90) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title('Correlation Matrix') plt.show() df = df.drop(columns=['cod_municipio_tse']) x = sns.PairGrid(df) x.map(plt.scatter) uf = pd.DataFrame(df['uf'].value_counts()) eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf') eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum() norte = ['AM', 'RR', 'AP', 'PA', 'TO', 'RO', 'AC'] centroeste = ['MT', 'MS', 'GO'] sudeste = ['SP', 'ES', 'MG', 'RJ'] sul = ['PR', 'RS', 'SC'] nordeste = ['MA', 'PI', 'CE', 'RN', 'PE', 'PB', 'SE', 'AL', 'BA'] df_region = eleitores df_region['regiao'] = '' for i, r in df_region.iterrows(): if r['uf'] in norte: df_region.at[i, 'regiao'] = 'Norte' elif r['uf'] in centroeste: df_region.at[i, 'regiao'] = 'Centro-Oeste' elif r['uf'] in sudeste: df_region.at[i, 'regiao'] = 'Sudeste' elif r['uf'] in sul: df_region.at[i, 'regiao'] = 'Sul' else: df_region.at[i, 'regiao'] = 'Nordeste' df_ufs = pd.DataFrame(norte + centroeste + sudeste + sul + nordeste) reg = pd.DataFrame(df_region['regiao'].value_counts()) elec = pd.DataFrame(df_region.drop(columns=['uf']).groupby(['regiao']).sum()) plt.figure(figsize=(10, 15)) sns.violinplot(y='total_eleitores', x='regiao', data=df_region)
code
17139154/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';') df.shape corr = df.corr() plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1) plt.xticks(range(len(corr.columns)), corr.columns, rotation=90) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title('Correlation Matrix') plt.show() df = df.drop(columns=['cod_municipio_tse']) df['uf'].value_counts().count()
code
17139154/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt # plotting import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';') df.shape corr = df.corr() plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k') corrMat = plt.matshow(corr, fignum = 1) plt.xticks(range(len(corr.columns)), corr.columns, rotation=90) plt.yticks(range(len(corr.columns)), corr.columns) plt.gca().xaxis.tick_bottom() plt.colorbar(corrMat) plt.title('Correlation Matrix') plt.show() df = df.drop(columns=['cod_municipio_tse']) x = sns.PairGrid(df) x.map(plt.scatter) uf = pd.DataFrame(df['uf'].value_counts()) eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf') eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum() plt.figure(figsize=(15, 5)) plt.title('Total de eleitores em cada UF') sns.barplot(x=eleitores_grpd_by_uf.index, y=eleitores_grpd_by_uf.total_eleitores)
code
17139154/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';') df.shape
code
129016252/cell_13
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.axes) print('-' * 30) print(p.axis(axis=Axis.X)) print('-' * 30) print(p.axis(axis=Axis.Y)) print('-' * 30) print(p.ticks(axis=Axis.X)) print('-' * 30) print(p.ticks(axis=Axis.X, filter='id')) print('-' * 30) print(p.ticks(axis=Axis.X, filter='tick_pt')) print('-' * 30) print(p.tick_type(axis=Axis.X)) print('-' * 30) print(p.values_type(axis=Axis.X))
code
129016252/cell_9
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.chart_type)
code
129016252/cell_11
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.text())
code
129016252/cell_7
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.name) print(p.json_path) print(p.image_path)
code
129016252/cell_18
[ "text_plain_output_1.png" ]
from PIL import Image, ImageDraw from benetech_annotation_parser.annotation_api import AnnotationParser, Axis from typing import Dict import matplotlib.pyplot as plt import random train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) from typing import Dict import matplotlib.pyplot as plt from PIL import Image, ImageDraw def draw_rect_angle(img: Image.Image, rectangle_coord: Dict[str, int]): x0 = rectangle_coord['x0'] y0 = rectangle_coord['y0'] h = rectangle_coord['height'] w = rectangle_coord['width'] draw = ImageDraw.Draw(img) draw.rectangle([(x0, y0), (x0 + w, y0 + h)], outline='blue', width=0) def draw_rect_angle_rotate(img: Image.Image, rectangle_coord: Dict[str, int], annotation_type: str): x0 = rectangle_coord['x0'] x1 = rectangle_coord['x1'] x2 = rectangle_coord['x2'] x3 = rectangle_coord['x3'] y0 = rectangle_coord['y0'] y1 = rectangle_coord['y1'] y2 = rectangle_coord['y2'] y3 = rectangle_coord['y3'] draw = ImageDraw.Draw(img) color = {'tick_label': (255, 0, 0), 'chart_title': (0, 192, 192), 'axis_title': (255, 255, 0)}[annotation_type] draw.line([x0, y0, x1, y1], fill=color, width=2) draw.line([x1, y1, x2, y2], fill=color, width=2) draw.line([x2, y2, x3, y3], fill=color, width=2) draw.line([x3, y3, x0, y0], fill=color, width=2) def draw_point(img: Image.Image, coord: Dict[str, int]): draw = ImageDraw.Draw(img) draw.ellipse([(coord['x'] - 1, coord['y'] - 1), (coord['x'] + 1, coord['y'] + 1)], fill='lime', outline='lime', width=10) def visualization_show(annotation_parser: AnnotationParser, index: int): ap = annotation_parser.get_annotation(index=index) img = Image.open(ap.image_path) polygon = ap.text(filter='polygon') role = ap.text(filter='role') ticks_x = ap.ticks(axis=Axis.X, filter='tick_pt') ticks_y = ap.ticks(axis=Axis.Y, filter='tick_pt') plt.axis('off') import random for i in range(5): random_index = random.randint(0, len(annotation_parser)) visualization_show(annotation_parser, random_index)
code
129016252/cell_8
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.source)
code
129016252/cell_3
[ "text_plain_output_1.png" ]
# api install !pip install benetech-annotation-parser
code
129016252/cell_14
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.data_series()) print('-' * 30) print(p.data_series(filter='x')) print('-' * 30) print(p.data_series(filter='y'))
code
129016252/cell_10
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.plot_bb)
code
129016252/cell_12
[ "text_plain_output_1.png" ]
from benetech_annotation_parser.annotation_api import AnnotationParser, Axis train_dataset_path = '/kaggle/input/benetech-making-graphs-accessible/train' annotation_parser = AnnotationParser(train_dataset_path) p = annotation_parser.get_annotation(0) print(p.text(filter='id')) print('-' * 30) print(p.text(filter='polygon')) print('-' * 30) print(p.text(filter='text')) print('-' * 30) print(p.text(filter='role'))
code
328841/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl for d in ['date_x', 'date_y']: print('Start of ' + d + ': ' + str(df_train[d].min().date())) print(' End of ' + d + ': ' + str(df_train[d].max().date())) print('Range of ' + d + ': ' + str(df_train[d].max() - df_train[d].min()) + '\n')
code
106208028/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) print(X.shape) print(y.shape)
code
106208028/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x='wifi', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106208028/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.head()
code
106208028/cell_23
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) kf = KFold(n_splits=5) kf.get_n_splits(X)
code
106208028/cell_20
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test) confusion_matrix(y_test, clf.predict(X_test))
code
106208028/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.info()
code
106208028/cell_2
[ "image_output_1.png" ]
!pip install pydotplus
code
106208028/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(), cmap='BrBG', annot=True, linewidths=2.0)
code
106208028/cell_19
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train) clf.score(X_test, y_test)
code
106208028/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.metrics import confusion_matrix, roc_auc_score, accuracy_score from sklearn import metrics from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC from sklearn.neighbors import KNeighborsClassifier from sklearn.tree import DecisionTreeClassifier import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
106208028/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any()
code
106208028/cell_18
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression clf = LogisticRegression(random_state=0).fit(X_train, y_train) clf.score(X_train, y_train)
code
106208028/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) kf = KFold(n_splits=5) kf.get_n_splits(X) training_scores_log = [] testing_scores_log = [] for fold, (train_index, test_index) in enumerate(kf.split(X)): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] clf_log = LogisticRegression(random_state=0).fit(X_train, y_train) training_scores_log.append(clf_log.score(X_train, y_train)) testing_scores_log.append(clf_log.score(X_test, y_test)) training_scores_svm_lin = [] testing_scores_svm_lin = [] for fold, (train_index, test_index) in enumerate(kf.split(X)): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] clf_svm_lin = SVC(kernel='linear').fit(X_train, y_train) print(f'Fold {fold + 1} -> The score of the training data set is: ', clf_svm_lin.score(X_train, y_train)) print(f'Fold {fold + 1} -> The score of the testing (out of fold) data set is: ', clf_svm_lin.score(X_test, y_test)) training_scores_svm_lin.append(clf_svm_lin.score(X_train, y_train)) testing_scores_svm_lin.append(clf_svm_lin.score(X_test, y_test)) print('\n') print(f'The average training set accuracy is: {sum(training_scores_svm_lin) / len(training_scores_svm_lin)}') print(f'The average testing set accuracy is: {sum(testing_scores_svm_lin) / len(testing_scores_svm_lin)}')
code
106208028/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x='blue', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106208028/cell_15
[ "text_plain_output_1.png" ]
X_train
code
106208028/cell_16
[ "text_plain_output_1.png" ]
X_test
code
106208028/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import KFold, train_test_split, cross_val_score from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="n_cores",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") plt.figure(figsize=(15, 12)) g = sns.heatmap(train.corr(),cmap="BrBG",annot=True, linewidths = 2.0) scaler = StandardScaler() X = scaler.fit_transform(train.drop(['price_range'], axis=1)) y = np.ravel(train[['price_range']]) kf = KFold(n_splits=5) kf.get_n_splits(X) training_scores_log = [] testing_scores_log = [] for fold, (train_index, test_index) in enumerate(kf.split(X)): X_train = X[train_index] y_train = y[train_index] X_test = X[test_index] y_test = y[test_index] clf_log = LogisticRegression(random_state=0).fit(X_train, y_train) print(f'Fold {fold + 1} -> The score of the training data set is: ', clf_log.score(X_train, y_train)) print(f'Fold {fold + 1} -> The score of the testing (out of fold) data set is: ', clf_log.score(X_test, y_test)) training_scores_log.append(clf_log.score(X_train, y_train)) testing_scores_log.append(clf_log.score(X_test, y_test)) print('\n') print(f'The average training set accuracy is: {sum(training_scores_log) / len(training_scores_log)}') print(f'The average testing set accuracy is: {sum(testing_scores_log) / len(testing_scores_log)}')
code
106208028/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.isna().any() g = sns.catplot(x="blue",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x="wifi",y="price_range",data=train, kind = 'bar', height = 6, palette = "muted") g.despine(left=True) g = g.set_ylabels("price_range") g = sns.catplot(x='n_cores', y='price_range', data=train, kind='bar', height=6, palette='muted') g.despine(left=True) g = g.set_ylabels('price_range')
code
106208028/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/mobile-price-classification/train.csv') train.describe()
code
72068164/cell_9
[ "image_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') daily_activities.info() daily_calories.info() daily_intensities.info() daily_steps.info()
code
72068164/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') sleep_day.info() weight_index.info()
code
72068164/cell_28
[ "text_html_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd import seaborn as sns daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(weight_index.Id.unique()) len(daily_activities.Id.unique()) len(sleep_day.Id.unique()) sleep_day.columns = ['Id', 'Date', 'TotalSleepRecords', 'TotalMinutesAsleep', 'TotalTimeInBed'] daily_activities.columns = ['Id', 'Date', 'TotalSteps', 'TotalDistance', 'TrackerDistance', 'LoggedActivitiesDistance', 'VeryActiveDistance', 'ModeratelyActiveDistance', 'LightActiveDistance', 'SedentaryActiveDistance', 'VeryActiveMinutes', 'FairlyActiveMinutes', 'LightlyActiveMinutes', 'SedentaryMinutes', 'Calories'] sleep_day.Date = pd.to_datetime(sleep_day['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') daily_activities.Date = pd.to_datetime(daily_activities['Date'], format='%m/%d/%Y').dt.strftime('%m/%d/%Y') weight_index.Date = pd.to_datetime(weight_index['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') #Change date into day of week index_day=[] week_day=[] for i in range(len(daily_activities.Id)): day=dt.datetime.strptime(daily_activities.Date[i], "%m/%d/%Y") week_day.append(day.strftime("%A")) index_day.append(day.weekday()) i=i+1 daily_activities["DayofWeek"]=week_day daily_activities["IndexofWeek"]=index_day gb_daily_activities=daily_activities.groupby(by=['DayofWeek']).mean().reset_index() gb_daily_activities.sort_values("IndexofWeek") sns.set_theme(style="darkgrid") plt.figure(figsize=(10,8)) ax=sns.barplot(x="DayofWeek", y="TotalSteps",data=gb_daily_activities.sort_values("IndexofWeek"), palette="Accent_r") ax.set_ylabel("Average Steps",fontsize=20) ax.set_xlabel("Day of Week ",fontsize=20) ax.set_title("Average Steps During Week ",fontsize=25) plt.show() hour = [] for i in range(len(hourly_steps.Id)): day = dt.datetime.strptime(hourly_steps.ActivityHour[i], '%m/%d/%Y %I:%M:%S %p') hour.append(day.strftime('%H')) i = i + 1 hourly_steps['Hour'] = hour gb_hourly_steps = hourly_steps.groupby('Hour').mean().reset_index() sns.set_theme(style='darkgrid') plt.figure(figsize=(10, 8)) hourly_chart = sns.barplot(data=gb_hourly_steps, x='Hour', y='StepTotal') hourly_chart.set_ylabel('Average Steps', fontsize=20) hourly_chart.set_xlabel('Hour', fontsize=20) hourly_chart.set_title('Hourly distribution of user steps', fontsize=25) plt.show()
code
72068164/cell_15
[ "text_html_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(weight_index.Id.unique())
code
72068164/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(daily_activities.Id.unique())
code
72068164/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(sleep_day.Id.unique())
code
72068164/cell_31
[ "image_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd import seaborn as sns daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(weight_index.Id.unique()) len(daily_activities.Id.unique()) len(sleep_day.Id.unique()) sleep_day.columns = ['Id', 'Date', 'TotalSleepRecords', 'TotalMinutesAsleep', 'TotalTimeInBed'] daily_activities.columns = ['Id', 'Date', 'TotalSteps', 'TotalDistance', 'TrackerDistance', 'LoggedActivitiesDistance', 'VeryActiveDistance', 'ModeratelyActiveDistance', 'LightActiveDistance', 'SedentaryActiveDistance', 'VeryActiveMinutes', 'FairlyActiveMinutes', 'LightlyActiveMinutes', 'SedentaryMinutes', 'Calories'] sleep_day.Date = pd.to_datetime(sleep_day['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') daily_activities.Date = pd.to_datetime(daily_activities['Date'], format='%m/%d/%Y').dt.strftime('%m/%d/%Y') weight_index.Date = pd.to_datetime(weight_index['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') #Change date into day of week index_day=[] week_day=[] for i in range(len(daily_activities.Id)): day=dt.datetime.strptime(daily_activities.Date[i], "%m/%d/%Y") week_day.append(day.strftime("%A")) index_day.append(day.weekday()) i=i+1 daily_activities["DayofWeek"]=week_day daily_activities["IndexofWeek"]=index_day gb_daily_activities=daily_activities.groupby(by=['DayofWeek']).mean().reset_index() gb_daily_activities.sort_values("IndexofWeek") sns.set_theme(style="darkgrid") plt.figure(figsize=(10,8)) ax=sns.barplot(x="DayofWeek", y="TotalSteps",data=gb_daily_activities.sort_values("IndexofWeek"), palette="Accent_r") ax.set_ylabel("Average Steps",fontsize=20) ax.set_xlabel("Day of Week ",fontsize=20) ax.set_title("Average Steps During Week ",fontsize=25) plt.show() hour=[] for i in range(len(hourly_steps.Id)): day=dt.datetime.strptime(hourly_steps.ActivityHour[i], "%m/%d/%Y %I:%M:%S %p") hour.append(day.strftime("%H")) i=i+1 hourly_steps["Hour"]=hour gb_hourly_steps=hourly_steps.groupby('Hour').mean().reset_index() sns.set_theme(style="darkgrid") plt.figure(figsize=(10,8)) hourly_chart=sns.barplot(data=gb_hourly_steps,x="Hour",y="StepTotal") hourly_chart.set_ylabel("Average Steps",fontsize=20) hourly_chart.set_xlabel("Hour",fontsize=20) hourly_chart.set_title("Hourly distribution of user steps",fontsize=25) plt.show() sleep_day['TimeTakeToSleep'] = sleep_day['TotalTimeInBed'] - sleep_day['TotalMinutesAsleep'] hourly_intensities.columns = ['Id', 'Date', 'TotalIntensity', 'AverageIntensity'] hourly_intensities.Date = pd.to_datetime(hourly_intensities['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') gb_hourly_intensities = hourly_intensities.groupby(['Date', 'Id']).sum().reset_index() sleep_and_intensities = pd.merge(sleep_day, gb_hourly_intensities, on=['Date', 'Id'], how='inner') f, axes = plt.subplots(1, 3, figsize=(12, 6)) k1 = sns.regplot(data=sleep_and_intensities, x='TimeTakeToSleep', y='TotalIntensity', ax=axes[1]) k2 = sns.regplot(data=sleep_and_intensities, x='TotalMinutesAsleep', y='TotalIntensity', ax=axes[0]) k2 = sns.regplot(data=sleep_and_intensities, x='TotalTimeInBed', y='TotalIntensity', ax=axes[2])
code
72068164/cell_24
[ "text_html_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd import seaborn as sns daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(weight_index.Id.unique()) len(daily_activities.Id.unique()) len(sleep_day.Id.unique()) sleep_day.columns = ['Id', 'Date', 'TotalSleepRecords', 'TotalMinutesAsleep', 'TotalTimeInBed'] daily_activities.columns = ['Id', 'Date', 'TotalSteps', 'TotalDistance', 'TrackerDistance', 'LoggedActivitiesDistance', 'VeryActiveDistance', 'ModeratelyActiveDistance', 'LightActiveDistance', 'SedentaryActiveDistance', 'VeryActiveMinutes', 'FairlyActiveMinutes', 'LightlyActiveMinutes', 'SedentaryMinutes', 'Calories'] sleep_day.Date = pd.to_datetime(sleep_day['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') daily_activities.Date = pd.to_datetime(daily_activities['Date'], format='%m/%d/%Y').dt.strftime('%m/%d/%Y') weight_index.Date = pd.to_datetime(weight_index['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') index_day = [] week_day = [] for i in range(len(daily_activities.Id)): day = dt.datetime.strptime(daily_activities.Date[i], '%m/%d/%Y') week_day.append(day.strftime('%A')) index_day.append(day.weekday()) i = i + 1 daily_activities['DayofWeek'] = week_day daily_activities['IndexofWeek'] = index_day gb_daily_activities = daily_activities.groupby(by=['DayofWeek']).mean().reset_index() gb_daily_activities.sort_values('IndexofWeek') sns.set_theme(style='darkgrid') plt.figure(figsize=(10, 8)) ax = sns.barplot(x='DayofWeek', y='TotalSteps', data=gb_daily_activities.sort_values('IndexofWeek'), palette='Accent_r') ax.set_ylabel('Average Steps', fontsize=20) ax.set_xlabel('Day of Week ', fontsize=20) ax.set_title('Average Steps During Week ', fontsize=25) plt.show()
code
72068164/cell_14
[ "text_html_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') weight_index.head()
code
72068164/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') len(weight_index.Id.unique()) len(daily_activities.Id.unique()) len(sleep_day.Id.unique()) sleep_day.columns = ['Id', 'Date', 'TotalSleepRecords', 'TotalMinutesAsleep', 'TotalTimeInBed'] daily_activities.columns = ['Id', 'Date', 'TotalSteps', 'TotalDistance', 'TrackerDistance', 'LoggedActivitiesDistance', 'VeryActiveDistance', 'ModeratelyActiveDistance', 'LightActiveDistance', 'SedentaryActiveDistance', 'VeryActiveMinutes', 'FairlyActiveMinutes', 'LightlyActiveMinutes', 'SedentaryMinutes', 'Calories'] sleep_day.Date = pd.to_datetime(sleep_day['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') daily_activities.Date = pd.to_datetime(daily_activities['Date'], format='%m/%d/%Y').dt.strftime('%m/%d/%Y') weight_index.Date = pd.to_datetime(weight_index['Date'], format='%m/%d/%Y %I:%M:%S %p').dt.strftime('%m/%d/%Y') daily_activities.describe()
code
72068164/cell_27
[ "image_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') hourly_steps.head()
code
72068164/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd daily_activities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyActivity_merged.csv') daily_calories = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyCalories_merged.csv') daily_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailyIntensities_merged.csv') daily_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/dailySteps_merged.csv') hourly_steps = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlySteps_merged.csv') hourly_intensities = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/hourlyIntensities_merged.csv') sleep_day = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/sleepDay_merged.csv') weight_index = pd.read_csv('../input/fitbit/Fitabase Data 4.12.16-5.12.16/weightLogInfo_merged.csv') sleep_day.head()
code
89141749/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89141749/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns prices = pd.read_csv('../input/avocado/avocado.csv', index_col=0) prices_2018 = prices.query("Date >= '2018-01-01' & Date <= '2018-12-31'") prices_2018 grouped_2018 = prices_2018.groupby('region')['AveragePrice'].mean() grouped_2018 = grouped_2018.reset_index() grouped_2018 plt.figure(figsize=(14, 12)) sns.barplot(x=grouped_2018.AveragePrice, y=grouped_2018['region']) plt.xlabel('') plt.title('Average Price for Avocado, by Regions')
code
89141749/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) prices = pd.read_csv('../input/avocado/avocado.csv', index_col=0) prices_2018 = prices.query("Date >= '2018-01-01' & Date <= '2018-12-31'") prices_2018
code
89141749/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) prices = pd.read_csv('../input/avocado/avocado.csv', index_col=0) prices_2018 = prices.query("Date >= '2018-01-01' & Date <= '2018-12-31'") prices_2018 grouped_2018 = prices_2018.groupby('region')['AveragePrice'].mean() grouped_2018 = grouped_2018.reset_index() grouped_2018
code
17133772/cell_30
[ "text_html_output_1.png" ]
from sklearn import metrics from sklearn.preprocessing import LabelEncoder import lightgbm as lgb import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count() ks = ks.query('state != "live"') ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int)) ks = ks.assign(hour=ks.launched.dt.hour, day=ks.launched.dt.day, month=ks.launched.dt.month, year=ks.launched.dt.year) cat_features = ['category', 'currency', 'country'] encoder = LabelEncoder() encoded = ks[cat_features].apply(encoder.fit_transform) data = ks[['goal', 'hour', 'day', 'month', 'year', 'outcome']].join(encoded) valid_fraction = 0.1 valid_size = int(len(data) * valid_fraction) train = data[:-2 * valid_size] valid = data[-2 * valid_size:-valid_size] test = data[-valid_size:] import lightgbm as lgb feature_cols = train.columns.drop('outcome') dtrain = lgb.Dataset(train[feature_cols], label=train['outcome']) dvalid = lgb.Dataset(valid[feature_cols], label=valid['outcome']) param = {'num_leaves': 64, 'objective': 'binary'} param['metric'] = 'auc' num_round = 1000 bst = lgb.train(param, dtrain, num_round, valid_sets=[dvalid], early_stopping_rounds=10, verbose_eval=False) ypred = bst.predict(test[feature_cols]) score = metrics.roc_auc_score(test['outcome'], ypred) print(f'Test AUC score: {score}')
code
17133772/cell_20
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count() ks = ks.query('state != "live"') ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int)) ks = ks.assign(hour=ks.launched.dt.hour, day=ks.launched.dt.day, month=ks.launched.dt.month, year=ks.launched.dt.year) cat_features = ['category', 'currency', 'country'] encoder = LabelEncoder() encoded = ks[cat_features].apply(encoder.fit_transform) data = ks[['goal', 'hour', 'day', 'month', 'year', 'outcome']].join(encoded) data.head()
code
17133772/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) ks.head(10)
code
17133772/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state)
code
17133772/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count() ks = ks.query('state != "live"') ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int)) ks = ks.assign(hour=ks.launched.dt.hour, day=ks.launched.dt.day, month=ks.launched.dt.month, year=ks.launched.dt.year) cat_features = ['category', 'currency', 'country'] encoder = LabelEncoder() encoded = ks[cat_features].apply(encoder.fit_transform) encoded.head(10)
code
17133772/cell_24
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count() ks = ks.query('state != "live"') ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int)) ks = ks.assign(hour=ks.launched.dt.hour, day=ks.launched.dt.day, month=ks.launched.dt.month, year=ks.launched.dt.year) cat_features = ['category', 'currency', 'country'] encoder = LabelEncoder() encoded = ks[cat_features].apply(encoder.fit_transform) data = ks[['goal', 'hour', 'day', 'month', 'year', 'outcome']].join(encoded) valid_fraction = 0.1 valid_size = int(len(data) * valid_fraction) train = data[:-2 * valid_size] valid = data[-2 * valid_size:-valid_size] test = data[-valid_size:] for each in [train, valid, test]: print(f'Outcome fraction = {each.outcome.mean():.4f}')
code
17133772/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count() ks = ks.query('state != "live"') ks = ks.assign(outcome=(ks['state'] == 'successful').astype(int)) ks = ks.assign(hour=ks.launched.dt.hour, day=ks.launched.dt.day, month=ks.launched.dt.month, year=ks.launched.dt.year) ks.head()
code
17133772/cell_10
[ "text_html_output_1.png" ]
import pandas as pd ks = pd.read_csv('../input/kickstarter-projects/ks-projects-201801.csv', parse_dates=['deadline', 'launched']) pd.unique(ks.state) ks.groupby('state')['ID'].count()
code
89141713/cell_9
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.head()
code
89141713/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() sns.set_style('whitegrid') fig = plt.figure(figsize = (12, 8)) # x distribution ax1 = fig.add_subplot(3, 2, 1) sns.countplot(x = 'x', data = train, palette="Set2") plt.xticks() ax1.spines['right'].set_visible(False) ax1.spines['top'].set_visible(False) ax1.set_xlabel("x value", fontsize=14, labelpad=10) ax1.set_ylabel("Count", fontsize=14, labelpad=10) ax1.set_title('Distribution of x - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax2 = fig.add_subplot(3, 2, 2) sns.countplot(x = 'x', data = test, palette="Set2") plt.xticks() ax2.spines['right'].set_visible(False) ax2.spines['top'].set_visible(False) ax2.set_xlabel("x value", fontsize=14, labelpad=10) ax2.set_ylabel("Count", fontsize=14, labelpad=10) ax2.set_title('Distribution of x - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # y distribution ax3 = fig.add_subplot(3, 2, 3) sns.countplot(x = 'y', data = train, palette="Set2") plt.xticks() ax3.spines['right'].set_visible(False) ax3.spines['top'].set_visible(False) ax3.set_xlabel("y value", fontsize=14, labelpad=10) ax3.set_ylabel("Count", fontsize=14, labelpad=10) ax3.set_title('Distribution of y - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax4 = fig.add_subplot(3, 2, 4) sns.countplot(x = 'y', data = test, palette="Set2") plt.xticks() ax4.spines['right'].set_visible(False) ax4.spines['top'].set_visible(False) ax4.set_xlabel("y value", fontsize=14, labelpad=10) ax4.set_ylabel("Count", fontsize=14, labelpad=10) ax4.set_title('Distribution of y - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # direction distribution ax5 = fig.add_subplot(3, 2, 5) sns.countplot(x = 'direction', data = train, palette="Set2") plt.xticks() ax5.spines['right'].set_visible(False) ax5.spines['top'].set_visible(False) ax5.set_xlabel("direction", fontsize=14, labelpad=10) ax5.set_ylabel("Count", fontsize=14, labelpad=10) ax5.set_title('Distribution of direction - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax6 = fig.add_subplot(3, 2, 6) sns.countplot(x = 'direction', data = test, palette="Set2") plt.xticks() ax6.spines['right'].set_visible(False) ax6.spines['top'].set_visible(False) ax6.set_xlabel("direction", fontsize=14, labelpad=10) ax6.set_ylabel("Count", fontsize=14, labelpad=10) ax6.set_title('Distribution of direction - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') fig.tight_layout() categorical_columns = train[['x', 'y', 'direction']].columns.to_numpy() fig = plt.figure(figsize=(10, 9)) rows = 3 cols = 1 for idx, categorical_column in enumerate(categorical_columns): ax = fig.add_subplot(rows, cols, idx + 1) sns.barplot(x=categorical_column, y='congestion', data=train.groupby(categorical_column).mean('congestion')['congestion'].reset_index(), palette='Set2') ax.xaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.yaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_ylabel('congestion') ax.set_xlabel(categorical_column + ' value') ax.bar_label(ax.containers[0]) ax.set_title('Average congestion by ' + categorical_column, loc='center', fontsize=14, fontweight='bold', pad=20) ax.legend() fig.tight_layout() fig.show()
code
89141713/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum()
code
89141713/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89141713/cell_28
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() sns.set_style('whitegrid') fig = plt.figure(figsize = (12, 8)) # x distribution ax1 = fig.add_subplot(3, 2, 1) sns.countplot(x = 'x', data = train, palette="Set2") plt.xticks() ax1.spines['right'].set_visible(False) ax1.spines['top'].set_visible(False) ax1.set_xlabel("x value", fontsize=14, labelpad=10) ax1.set_ylabel("Count", fontsize=14, labelpad=10) ax1.set_title('Distribution of x - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax2 = fig.add_subplot(3, 2, 2) sns.countplot(x = 'x', data = test, palette="Set2") plt.xticks() ax2.spines['right'].set_visible(False) ax2.spines['top'].set_visible(False) ax2.set_xlabel("x value", fontsize=14, labelpad=10) ax2.set_ylabel("Count", fontsize=14, labelpad=10) ax2.set_title('Distribution of x - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # y distribution ax3 = fig.add_subplot(3, 2, 3) sns.countplot(x = 'y', data = train, palette="Set2") plt.xticks() ax3.spines['right'].set_visible(False) ax3.spines['top'].set_visible(False) ax3.set_xlabel("y value", fontsize=14, labelpad=10) ax3.set_ylabel("Count", fontsize=14, labelpad=10) ax3.set_title('Distribution of y - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax4 = fig.add_subplot(3, 2, 4) sns.countplot(x = 'y', data = test, palette="Set2") plt.xticks() ax4.spines['right'].set_visible(False) ax4.spines['top'].set_visible(False) ax4.set_xlabel("y value", fontsize=14, labelpad=10) ax4.set_ylabel("Count", fontsize=14, labelpad=10) ax4.set_title('Distribution of y - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # direction distribution ax5 = fig.add_subplot(3, 2, 5) sns.countplot(x = 'direction', data = train, palette="Set2") plt.xticks() ax5.spines['right'].set_visible(False) ax5.spines['top'].set_visible(False) ax5.set_xlabel("direction", fontsize=14, labelpad=10) ax5.set_ylabel("Count", fontsize=14, labelpad=10) ax5.set_title('Distribution of direction - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax6 = fig.add_subplot(3, 2, 6) sns.countplot(x = 'direction', data = test, palette="Set2") plt.xticks() ax6.spines['right'].set_visible(False) ax6.spines['top'].set_visible(False) ax6.set_xlabel("direction", fontsize=14, labelpad=10) ax6.set_ylabel("Count", fontsize=14, labelpad=10) ax6.set_title('Distribution of direction - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') fig.tight_layout() categorical_columns = train[['x', 'y', 'direction']].columns.to_numpy() fig = plt.figure(figsize = (10, 9)) rows = 3 cols = 1 for idx, categorical_column in enumerate(categorical_columns): ax = fig.add_subplot(rows, cols, idx + 1) sns.barplot(x = categorical_column, y = 'congestion', data = train.groupby(categorical_column).mean('congestion')['congestion'].reset_index(), palette = 'Set2') ax.xaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.yaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_ylabel('congestion') ax.set_xlabel(categorical_column + " value") ax.bar_label(ax.containers[0]) ax.set_title('Average congestion by ' + categorical_column, loc = 'center', fontsize = 14, fontweight = 'bold', pad = 20) ax.legend() fig.tight_layout() fig.show() train.dtypes fig = plt.figure(figsize=(12, 3)) ax = fig.add_subplot(1, 1, 1) sns.lineplot(x=train['time'].dt.date, y='congestion', data=train) ax.xaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.yaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_ylabel('congestion') ax.set_xlabel('date') ax.set_title('Average congestion by date', loc='center', fontsize=14, fontweight='bold', pad=20) fig.tight_layout() fig.show()
code
89141713/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') test.head()
code
89141713/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') print(f'\x1b[92mNumber of rows in the table: {test.shape[0]}') print(f'\x1b[94mNumber of columns in the table: {test.shape[1]}') print(f'\x1b[91mNumber of observations in the table: {test.count().sum()}') print(f'\x1b[91mNumber of missing values in the table: {sum(test.isnull().sum())}') print(f'\x1b[91mNumber of duplicated records: {test.duplicated().sum()}') print() print(f'\x1b[95mData types') print(f'\x1b[90m{test.dtypes}') print() print(f'\x1b[95mData type counts') print(f'\x1b[90m{test.dtypes.value_counts()}') print() print(f'\x1b[95mUnique value in each column') print(f'\x1b[90m{test.nunique()}')
code
89141713/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() print('Test set rows / Train set rows = ' + str(round(test.count()[0] / train.count()[0] * 100, 2)))
code
89141713/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() sns.set_style('whitegrid') fig = plt.figure(figsize=(12, 8)) ax1 = fig.add_subplot(3, 2, 1) sns.countplot(x='x', data=train, palette='Set2') plt.xticks() ax1.spines['right'].set_visible(False) ax1.spines['top'].set_visible(False) ax1.set_xlabel('x value', fontsize=14, labelpad=10) ax1.set_ylabel('Count', fontsize=14, labelpad=10) ax1.set_title('Distribution of x - Train Set', loc='center', fontsize=14, fontweight='bold') ax2 = fig.add_subplot(3, 2, 2) sns.countplot(x='x', data=test, palette='Set2') plt.xticks() ax2.spines['right'].set_visible(False) ax2.spines['top'].set_visible(False) ax2.set_xlabel('x value', fontsize=14, labelpad=10) ax2.set_ylabel('Count', fontsize=14, labelpad=10) ax2.set_title('Distribution of x - Test Set', loc='center', fontsize=14, fontweight='bold') ax3 = fig.add_subplot(3, 2, 3) sns.countplot(x='y', data=train, palette='Set2') plt.xticks() ax3.spines['right'].set_visible(False) ax3.spines['top'].set_visible(False) ax3.set_xlabel('y value', fontsize=14, labelpad=10) ax3.set_ylabel('Count', fontsize=14, labelpad=10) ax3.set_title('Distribution of y - Train Set', loc='center', fontsize=14, fontweight='bold') ax4 = fig.add_subplot(3, 2, 4) sns.countplot(x='y', data=test, palette='Set2') plt.xticks() ax4.spines['right'].set_visible(False) ax4.spines['top'].set_visible(False) ax4.set_xlabel('y value', fontsize=14, labelpad=10) ax4.set_ylabel('Count', fontsize=14, labelpad=10) ax4.set_title('Distribution of y - Test Set', loc='center', fontsize=14, fontweight='bold') ax5 = fig.add_subplot(3, 2, 5) sns.countplot(x='direction', data=train, palette='Set2') plt.xticks() ax5.spines['right'].set_visible(False) ax5.spines['top'].set_visible(False) ax5.set_xlabel('direction', fontsize=14, labelpad=10) ax5.set_ylabel('Count', fontsize=14, labelpad=10) ax5.set_title('Distribution of direction - Train Set', loc='center', fontsize=14, fontweight='bold') ax6 = fig.add_subplot(3, 2, 6) sns.countplot(x='direction', data=test, palette='Set2') plt.xticks() ax6.spines['right'].set_visible(False) ax6.spines['top'].set_visible(False) ax6.set_xlabel('direction', fontsize=14, labelpad=10) ax6.set_ylabel('Count', fontsize=14, labelpad=10) ax6.set_title('Distribution of direction - Test Set', loc='center', fontsize=14, fontweight='bold') fig.tight_layout()
code
89141713/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() sns.set_style('whitegrid') fig = plt.figure(figsize = (12, 8)) # x distribution ax1 = fig.add_subplot(3, 2, 1) sns.countplot(x = 'x', data = train, palette="Set2") plt.xticks() ax1.spines['right'].set_visible(False) ax1.spines['top'].set_visible(False) ax1.set_xlabel("x value", fontsize=14, labelpad=10) ax1.set_ylabel("Count", fontsize=14, labelpad=10) ax1.set_title('Distribution of x - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax2 = fig.add_subplot(3, 2, 2) sns.countplot(x = 'x', data = test, palette="Set2") plt.xticks() ax2.spines['right'].set_visible(False) ax2.spines['top'].set_visible(False) ax2.set_xlabel("x value", fontsize=14, labelpad=10) ax2.set_ylabel("Count", fontsize=14, labelpad=10) ax2.set_title('Distribution of x - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # y distribution ax3 = fig.add_subplot(3, 2, 3) sns.countplot(x = 'y', data = train, palette="Set2") plt.xticks() ax3.spines['right'].set_visible(False) ax3.spines['top'].set_visible(False) ax3.set_xlabel("y value", fontsize=14, labelpad=10) ax3.set_ylabel("Count", fontsize=14, labelpad=10) ax3.set_title('Distribution of y - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax4 = fig.add_subplot(3, 2, 4) sns.countplot(x = 'y', data = test, palette="Set2") plt.xticks() ax4.spines['right'].set_visible(False) ax4.spines['top'].set_visible(False) ax4.set_xlabel("y value", fontsize=14, labelpad=10) ax4.set_ylabel("Count", fontsize=14, labelpad=10) ax4.set_title('Distribution of y - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') # direction distribution ax5 = fig.add_subplot(3, 2, 5) sns.countplot(x = 'direction', data = train, palette="Set2") plt.xticks() ax5.spines['right'].set_visible(False) ax5.spines['top'].set_visible(False) ax5.set_xlabel("direction", fontsize=14, labelpad=10) ax5.set_ylabel("Count", fontsize=14, labelpad=10) ax5.set_title('Distribution of direction - Train Set', loc = 'center', fontsize = 14, fontweight = 'bold') ax6 = fig.add_subplot(3, 2, 6) sns.countplot(x = 'direction', data = test, palette="Set2") plt.xticks() ax6.spines['right'].set_visible(False) ax6.spines['top'].set_visible(False) ax6.set_xlabel("direction", fontsize=14, labelpad=10) ax6.set_ylabel("Count", fontsize=14, labelpad=10) ax6.set_title('Distribution of direction - Test Set', loc = 'center', fontsize = 14, fontweight = 'bold') fig.tight_layout() categorical_columns = train[['x', 'y', 'direction']].columns.to_numpy() fig = plt.figure(figsize = (10, 9)) rows = 3 cols = 1 for idx, categorical_column in enumerate(categorical_columns): ax = fig.add_subplot(rows, cols, idx + 1) sns.barplot(x = categorical_column, y = 'congestion', data = train.groupby(categorical_column).mean('congestion')['congestion'].reset_index(), palette = 'Set2') ax.xaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.yaxis.set_tick_params(labelsize=10, size=0, pad=5) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.set_ylabel('congestion') ax.set_xlabel(categorical_column + " value") ax.bar_label(ax.containers[0]) ax.set_title('Average congestion by ' + categorical_column, loc = 'center', fontsize = 14, fontweight = 'bold', pad = 20) ax.legend() fig.tight_layout() fig.show() train.dtypes
code
89141713/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/train.csv') test = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') submission = pd.read_csv('/kaggle/input/tabular-playground-series-mar-2022/test.csv') train.isnull().sum() train.describe()
code
89141713/cell_5
[ "image_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.model_selection import StratifiedKFold from sklearn.metrics import accuracy_score from sklearn.model_selection import train_test_split from scipy.stats import mode from xgboost import XGBClassifier from catboost import CatBoostClassifier from lightgbm import LGBMClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import VotingClassifier from datetime import datetime import warnings import time pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('float_format', '{:f}'.format) warnings.filterwarnings('ignore') RANDOM_STATE = 14 FOLDS = 5
code
106212246/cell_9
[ "image_output_1.png" ]
y_train
code
106212246/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/adl-classification/dataset.csv', names=['MQ1', 'MQ2', 'MQ3', 'MQ4', 'MQ5', 'MQ6', 'CO2']) data.info()
code
106212246/cell_11
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(random_state=1) model.fit(X_train, y_train)
code
106212246/cell_8
[ "text_plain_output_1.png" ]
X_train
code
106212246/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/adl-classification/dataset.csv', names=['MQ1', 'MQ2', 'MQ3', 'MQ4', 'MQ5', 'MQ6', 'CO2']) data
code
106212246/cell_14
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier import shap model = RandomForestClassifier(random_state=1) model.fit(X_train, y_train) acc = model.score(X_test, y_test) explainer = shap.TreeExplainer(model) shap_values = explainer.shap_values(X_test) shap.summary_plot(shap_values, X_test, class_names=model.classes_)
code
106212246/cell_12
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(random_state=1) model.fit(X_train, y_train) acc = model.score(X_test, y_test) print('Accuracy {:.2f}%'.format(acc * 100))
code
1005815/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import preprocessing from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb import numpy as np import pandas as pd import scipy from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from sklearn import preprocessing from subprocess import check_output df = pd.read_json('../input/train.json') df['priceperbed'] = df['price'].clip(upper=7000) / df['bedrooms'].clip(lower=1) df['created'] = df['created'].astype(np.datetime64) df['created_day'] = np.array(df.created.values, dtype='datetime64[D]').astype(np.float32) % 7 df['created_week'] = np.array(df.created.values, dtype='datetime64[W]').astype(np.float32) df['created_hour'] = np.array(df.created.values, dtype='datetime64[h]').astype(np.float32) % 24 df['desc_count'] = df.description.apply(lambda x: len(x.split())).clip(upper=150) df['features_count'] = df.features.apply(lambda x: len(x)) df['photos_count'] = df.photos.apply(lambda x: len(x)) lbl = preprocessing.LabelEncoder() lbl.fit(list(df['manager_id'].values)) df['manager_id'] = lbl.transform(list(df['manager_id'].values)) feature_list = ['no fee', 'hardwood floors', 'laundry in building'] df['features'] = df['features'].apply(lambda x: list(map(str.lower, x))) for feature in feature_list: df[feature] = df['features'].apply(lambda x: feature in x) vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') vectorizer.fit(df.description.values) temp = pd.concat([df_train.manager_id, pd.get_dummies(df_train.interest_level)], axis=1).groupby('manager_id').mean() temp.columns = ['high_frac', 'low_frac', 'medium_frac'] temp['count'] = df_train.groupby('manager_id').count().iloc[:, 1] temp['manager_skill'] = temp['high_frac'] * 2 + temp['medium_frac'] unranked_managers_ixes = temp['count'] < 8 ranked_managers_ixes = ~unranked_managers_ixes mean_values = temp.loc[ranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']].mean() temp.loc[unranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_train = df_train.merge(temp.reset_index(), how='left', on='manager_id') df_val = df_val.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_val['high_frac'].isnull() df_val.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_test = df_test.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_test['high_frac'].isnull() df_test.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values derived_cols = ['derived_' + str(i) for i in range(5)] cols = ['price', 'bathrooms', 'bedrooms', 'latitude', 'longitude', 'priceperbed', 'created_hour', 'desc_count', 'photos_count', 'features_count', 'no fee', 'hardwood floors', 'laundry in building', 'manager_skill'] svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) X_train = svd.fit_transform(vectorizer.transform(df_train.description)) X_train = np.hstack([X_train, df_train[cols].values]) X_val = svd.transform(vectorizer.transform(df_val.description)) X_val = np.hstack([X_val, df_val[cols].values]) X_test = svd.transform(vectorizer.transform(df_test.description)) X_test = np.hstack([X_test, df_test[cols].values]) target_num_map = {'high': 0, 'low': 1, 'medium': 2} y_train = np.array(df_train['interest_level'].apply(lambda x: target_num_map[x])) y_test = np.array(df_test['interest_level'].apply(lambda x: target_num_map[x])) y_val = np.array(df_val['interest_level'].apply(lambda x: target_num_map[x])) import xgboost as xgb SEED = 0 params = {'eta': 0.01, 'colsample_bytree': 0.8, 'subsample': 0.8, 'seed': 0, 'nthread': 16, 'objective': 'multi:softprob', 'eval_metric': 'mlogloss', 'num_class': 3} dtrain = xgb.DMatrix(data=X_train, label=y_train) bst = xgb.train(params, dtrain, 1000, verbose_eval=25) y_pred = bst.predict(dtrain) score = log_loss(df_train['interest_level'].values, y_pred) xgval = xgb.DMatrix(X_val) y_pred = bst.predict(xgval) score2 = log_loss(df_val['interest_level'].values, y_pred) xgtest = xgb.DMatrix(X_test) y_pred = bst.predict(xgtest) score3 = log_loss(df_test['interest_level'].values, y_pred) imps = bst.get_fscore() imps_sorted = [imps[i] for i in sorted(imps.keys(), key=lambda x: int(x[1:]))] pd.Series(index=derived_cols + cols, data=imps_sorted).sort_values().plot(kind='bar')
code
1005815/cell_1
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.feature_extraction.text import TfidfVectorizer from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import scipy from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from sklearn import preprocessing from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) df = pd.read_json('../input/train.json') df['priceperbed'] = df['price'].clip(upper=7000) / df['bedrooms'].clip(lower=1) df['created'] = df['created'].astype(np.datetime64) df['created_day'] = np.array(df.created.values, dtype='datetime64[D]').astype(np.float32) % 7 df['created_week'] = np.array(df.created.values, dtype='datetime64[W]').astype(np.float32) df['created_hour'] = np.array(df.created.values, dtype='datetime64[h]').astype(np.float32) % 24 df['desc_count'] = df.description.apply(lambda x: len(x.split())).clip(upper=150) df['features_count'] = df.features.apply(lambda x: len(x)) df['photos_count'] = df.photos.apply(lambda x: len(x)) lbl = preprocessing.LabelEncoder() lbl.fit(list(df['manager_id'].values)) df['manager_id'] = lbl.transform(list(df['manager_id'].values)) feature_list = ['no fee', 'hardwood floors', 'laundry in building'] df['features'] = df['features'].apply(lambda x: list(map(str.lower, x))) for feature in feature_list: df[feature] = df['features'].apply(lambda x: feature in x) vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') vectorizer.fit(df.description.values)
code
1005815/cell_7
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb import numpy as np import pandas as pd import scipy from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from sklearn import preprocessing from subprocess import check_output df = pd.read_json('../input/train.json') df['priceperbed'] = df['price'].clip(upper=7000) / df['bedrooms'].clip(lower=1) df['created'] = df['created'].astype(np.datetime64) df['created_day'] = np.array(df.created.values, dtype='datetime64[D]').astype(np.float32) % 7 df['created_week'] = np.array(df.created.values, dtype='datetime64[W]').astype(np.float32) df['created_hour'] = np.array(df.created.values, dtype='datetime64[h]').astype(np.float32) % 24 df['desc_count'] = df.description.apply(lambda x: len(x.split())).clip(upper=150) df['features_count'] = df.features.apply(lambda x: len(x)) df['photos_count'] = df.photos.apply(lambda x: len(x)) lbl = preprocessing.LabelEncoder() lbl.fit(list(df['manager_id'].values)) df['manager_id'] = lbl.transform(list(df['manager_id'].values)) feature_list = ['no fee', 'hardwood floors', 'laundry in building'] df['features'] = df['features'].apply(lambda x: list(map(str.lower, x))) for feature in feature_list: df[feature] = df['features'].apply(lambda x: feature in x) vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') vectorizer.fit(df.description.values) temp = pd.concat([df_train.manager_id, pd.get_dummies(df_train.interest_level)], axis=1).groupby('manager_id').mean() temp.columns = ['high_frac', 'low_frac', 'medium_frac'] temp['count'] = df_train.groupby('manager_id').count().iloc[:, 1] temp['manager_skill'] = temp['high_frac'] * 2 + temp['medium_frac'] unranked_managers_ixes = temp['count'] < 8 ranked_managers_ixes = ~unranked_managers_ixes mean_values = temp.loc[ranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']].mean() temp.loc[unranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_train = df_train.merge(temp.reset_index(), how='left', on='manager_id') df_val = df_val.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_val['high_frac'].isnull() df_val.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_test = df_test.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_test['high_frac'].isnull() df_test.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values derived_cols = ['derived_' + str(i) for i in range(5)] cols = ['price', 'bathrooms', 'bedrooms', 'latitude', 'longitude', 'priceperbed', 'created_hour', 'desc_count', 'photos_count', 'features_count', 'no fee', 'hardwood floors', 'laundry in building', 'manager_skill'] svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) X_train = svd.fit_transform(vectorizer.transform(df_train.description)) X_train = np.hstack([X_train, df_train[cols].values]) X_val = svd.transform(vectorizer.transform(df_val.description)) X_val = np.hstack([X_val, df_val[cols].values]) X_test = svd.transform(vectorizer.transform(df_test.description)) X_test = np.hstack([X_test, df_test[cols].values]) target_num_map = {'high': 0, 'low': 1, 'medium': 2} y_train = np.array(df_train['interest_level'].apply(lambda x: target_num_map[x])) y_test = np.array(df_test['interest_level'].apply(lambda x: target_num_map[x])) y_val = np.array(df_val['interest_level'].apply(lambda x: target_num_map[x])) import xgboost as xgb SEED = 0 params = {'eta': 0.01, 'colsample_bytree': 0.8, 'subsample': 0.8, 'seed': 0, 'nthread': 16, 'objective': 'multi:softprob', 'eval_metric': 'mlogloss', 'num_class': 3} dtrain = xgb.DMatrix(data=X_train, label=y_train) bst = xgb.train(params, dtrain, 1000, verbose_eval=25) y_pred = bst.predict(dtrain) score = log_loss(df_train['interest_level'].values, y_pred) xgval = xgb.DMatrix(X_val) y_pred = bst.predict(xgval) score2 = log_loss(df_val['interest_level'].values, y_pred) xgtest = xgb.DMatrix(X_test) y_pred = bst.predict(xgtest) score3 = log_loss(df_test['interest_level'].values, y_pred) imps = bst.get_fscore() imps_sorted = [imps[i] for i in sorted(imps.keys(), key=lambda x: int(x[1:]))] pd.Series(index=derived_cols + cols, data=imps_sorted).sort_values().plot(kind='bar') imps_sorted
code
1005815/cell_8
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb import numpy as np import pandas as pd import scipy from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from sklearn import preprocessing from subprocess import check_output df = pd.read_json('../input/train.json') df['priceperbed'] = df['price'].clip(upper=7000) / df['bedrooms'].clip(lower=1) df['created'] = df['created'].astype(np.datetime64) df['created_day'] = np.array(df.created.values, dtype='datetime64[D]').astype(np.float32) % 7 df['created_week'] = np.array(df.created.values, dtype='datetime64[W]').astype(np.float32) df['created_hour'] = np.array(df.created.values, dtype='datetime64[h]').astype(np.float32) % 24 df['desc_count'] = df.description.apply(lambda x: len(x.split())).clip(upper=150) df['features_count'] = df.features.apply(lambda x: len(x)) df['photos_count'] = df.photos.apply(lambda x: len(x)) lbl = preprocessing.LabelEncoder() lbl.fit(list(df['manager_id'].values)) df['manager_id'] = lbl.transform(list(df['manager_id'].values)) feature_list = ['no fee', 'hardwood floors', 'laundry in building'] df['features'] = df['features'].apply(lambda x: list(map(str.lower, x))) for feature in feature_list: df[feature] = df['features'].apply(lambda x: feature in x) vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') vectorizer.fit(df.description.values) temp = pd.concat([df_train.manager_id, pd.get_dummies(df_train.interest_level)], axis=1).groupby('manager_id').mean() temp.columns = ['high_frac', 'low_frac', 'medium_frac'] temp['count'] = df_train.groupby('manager_id').count().iloc[:, 1] temp['manager_skill'] = temp['high_frac'] * 2 + temp['medium_frac'] unranked_managers_ixes = temp['count'] < 8 ranked_managers_ixes = ~unranked_managers_ixes mean_values = temp.loc[ranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']].mean() temp.loc[unranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_train = df_train.merge(temp.reset_index(), how='left', on='manager_id') df_val = df_val.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_val['high_frac'].isnull() df_val.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_test = df_test.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_test['high_frac'].isnull() df_test.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values derived_cols = ['derived_' + str(i) for i in range(5)] cols = ['price', 'bathrooms', 'bedrooms', 'latitude', 'longitude', 'priceperbed', 'created_hour', 'desc_count', 'photos_count', 'features_count', 'no fee', 'hardwood floors', 'laundry in building', 'manager_skill'] svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) X_train = svd.fit_transform(vectorizer.transform(df_train.description)) X_train = np.hstack([X_train, df_train[cols].values]) X_val = svd.transform(vectorizer.transform(df_val.description)) X_val = np.hstack([X_val, df_val[cols].values]) X_test = svd.transform(vectorizer.transform(df_test.description)) X_test = np.hstack([X_test, df_test[cols].values]) target_num_map = {'high': 0, 'low': 1, 'medium': 2} y_train = np.array(df_train['interest_level'].apply(lambda x: target_num_map[x])) y_test = np.array(df_test['interest_level'].apply(lambda x: target_num_map[x])) y_val = np.array(df_val['interest_level'].apply(lambda x: target_num_map[x])) import xgboost as xgb SEED = 0 params = {'eta': 0.01, 'colsample_bytree': 0.8, 'subsample': 0.8, 'seed': 0, 'nthread': 16, 'objective': 'multi:softprob', 'eval_metric': 'mlogloss', 'num_class': 3} dtrain = xgb.DMatrix(data=X_train, label=y_train) bst = xgb.train(params, dtrain, 1000, verbose_eval=25) y_pred = bst.predict(dtrain) score = log_loss(df_train['interest_level'].values, y_pred) xgval = xgb.DMatrix(X_val) y_pred = bst.predict(xgval) score2 = log_loss(df_val['interest_level'].values, y_pred) xgtest = xgb.DMatrix(X_test) y_pred = bst.predict(xgtest) score3 = log_loss(df_test['interest_level'].values, y_pred) imps = bst.get_fscore() imps_sorted = [imps[i] for i in sorted(imps.keys(), key=lambda x: int(x[1:]))] pd.Series(index=derived_cols + cols, data=imps_sorted).sort_values().plot(kind='bar') imps
code
1005815/cell_5
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.decomposition import TruncatedSVD from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics import log_loss from subprocess import check_output import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import xgboost as xgb import numpy as np import pandas as pd import scipy from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.decomposition import TruncatedSVD from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import log_loss from sklearn.model_selection import train_test_split from sklearn import preprocessing from subprocess import check_output df = pd.read_json('../input/train.json') df['priceperbed'] = df['price'].clip(upper=7000) / df['bedrooms'].clip(lower=1) df['created'] = df['created'].astype(np.datetime64) df['created_day'] = np.array(df.created.values, dtype='datetime64[D]').astype(np.float32) % 7 df['created_week'] = np.array(df.created.values, dtype='datetime64[W]').astype(np.float32) df['created_hour'] = np.array(df.created.values, dtype='datetime64[h]').astype(np.float32) % 24 df['desc_count'] = df.description.apply(lambda x: len(x.split())).clip(upper=150) df['features_count'] = df.features.apply(lambda x: len(x)) df['photos_count'] = df.photos.apply(lambda x: len(x)) lbl = preprocessing.LabelEncoder() lbl.fit(list(df['manager_id'].values)) df['manager_id'] = lbl.transform(list(df['manager_id'].values)) feature_list = ['no fee', 'hardwood floors', 'laundry in building'] df['features'] = df['features'].apply(lambda x: list(map(str.lower, x))) for feature in feature_list: df[feature] = df['features'].apply(lambda x: feature in x) vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') vectorizer.fit(df.description.values) temp = pd.concat([df_train.manager_id, pd.get_dummies(df_train.interest_level)], axis=1).groupby('manager_id').mean() temp.columns = ['high_frac', 'low_frac', 'medium_frac'] temp['count'] = df_train.groupby('manager_id').count().iloc[:, 1] temp['manager_skill'] = temp['high_frac'] * 2 + temp['medium_frac'] unranked_managers_ixes = temp['count'] < 8 ranked_managers_ixes = ~unranked_managers_ixes mean_values = temp.loc[ranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']].mean() temp.loc[unranked_managers_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_train = df_train.merge(temp.reset_index(), how='left', on='manager_id') df_val = df_val.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_val['high_frac'].isnull() df_val.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values df_test = df_test.merge(temp.reset_index(), how='left', on='manager_id') new_manager_ixes = df_test['high_frac'].isnull() df_test.loc[new_manager_ixes, ['high_frac', 'low_frac', 'medium_frac', 'manager_skill']] = mean_values.values derived_cols = ['derived_' + str(i) for i in range(5)] cols = ['price', 'bathrooms', 'bedrooms', 'latitude', 'longitude', 'priceperbed', 'created_hour', 'desc_count', 'photos_count', 'features_count', 'no fee', 'hardwood floors', 'laundry in building', 'manager_skill'] svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42) X_train = svd.fit_transform(vectorizer.transform(df_train.description)) X_train = np.hstack([X_train, df_train[cols].values]) X_val = svd.transform(vectorizer.transform(df_val.description)) X_val = np.hstack([X_val, df_val[cols].values]) X_test = svd.transform(vectorizer.transform(df_test.description)) X_test = np.hstack([X_test, df_test[cols].values]) target_num_map = {'high': 0, 'low': 1, 'medium': 2} y_train = np.array(df_train['interest_level'].apply(lambda x: target_num_map[x])) y_test = np.array(df_test['interest_level'].apply(lambda x: target_num_map[x])) y_val = np.array(df_val['interest_level'].apply(lambda x: target_num_map[x])) import xgboost as xgb SEED = 0 params = {'eta': 0.01, 'colsample_bytree': 0.8, 'subsample': 0.8, 'seed': 0, 'nthread': 16, 'objective': 'multi:softprob', 'eval_metric': 'mlogloss', 'num_class': 3} dtrain = xgb.DMatrix(data=X_train, label=y_train) bst = xgb.train(params, dtrain, 1000, verbose_eval=25) y_pred = bst.predict(dtrain) score = log_loss(df_train['interest_level'].values, y_pred) xgval = xgb.DMatrix(X_val) y_pred = bst.predict(xgval) score2 = log_loss(df_val['interest_level'].values, y_pred) xgtest = xgb.DMatrix(X_test) y_pred = bst.predict(xgtest) score3 = log_loss(df_test['interest_level'].values, y_pred) print('%.6f %.6f %.6f' % (score, score2, score3))
code
105204314/cell_4
[ "text_plain_output_1.png" ]
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys dftr = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//train.csv') dftr['src'] = 'train' dfte = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//test.csv') dfte['src'] = 'test' df = pd.concat([dftr, dfte], ignore_index=True) target_cols = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions'] import sys sys.path.append('../input/iterativestratification') from iterstrat.ml_stratifiers import MultilabelStratifiedKFold FOLDS = 5 skf = MultilabelStratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=2022) for i, (train_index, val_index) in enumerate(skf.split(dftr, dftr[target_cols])): dftr.loc[val_index, 'FOLD'] = i dftr.FOLD.value_counts()
code
105204314/cell_2
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dftr = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//train.csv') dftr['src'] = 'train' dfte = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//test.csv') dfte['src'] = 'test' print(dftr.shape, dfte.shape, dfte.columns) df = pd.concat([dftr, dfte], ignore_index=True) dftr.head()
code
105204314/cell_1
[ "text_plain_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) import re import warnings def fxn(): warnings.warn('deprecated', DeprecationWarning) with warnings.catch_warnings(): warnings.simplefilter('ignore') fxn()
code
105204314/cell_5
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "text_plain_output_4.png", "text_plain_output_10.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LinearRegression,SGDRegressor from sklearn.metrics import mean_squared_error from sklearn.multioutput import MultiOutputRegressor import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import sys dftr = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//train.csv') dftr['src'] = 'train' dfte = pd.read_csv('/kaggle/input/feedback-prize-english-language-learning//test.csv') dfte['src'] = 'test' df = pd.concat([dftr, dfte], ignore_index=True) target_cols = ['cohesion', 'syntax', 'vocabulary', 'phraseology', 'grammar', 'conventions'] import sys sys.path.append('../input/iterativestratification') from iterstrat.ml_stratifiers import MultilabelStratifiedKFold FOLDS = 5 skf = MultilabelStratifiedKFold(n_splits=FOLDS, shuffle=True, random_state=2022) for i, (train_index, val_index) in enumerate(skf.split(dftr, dftr[target_cols])): dftr.loc[val_index, 'FOLD'] = i dftr.FOLD.value_counts() from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.preprocessing import OneHotEncoder, LabelEncoder from sklearn.naive_bayes import GaussianNB from sklearn.metrics import roc_auc_score from sklearn.linear_model import LinearRegression, SGDRegressor from scipy import sparse from sklearn.multioutput import MultiOutputRegressor from scipy.stats import pearsonr from sklearn.metrics import mean_squared_error preds = [] scores = [] def comp_score(y_true, y_pred): rmse_scores = [] for i in range(len(target_cols)): rmse_scores.append(np.sqrt(mean_squared_error(y_true[:, i], y_pred[:, i]))) return np.mean(rmse_scores) for fold in range(FOLDS): dftr_ = dftr[dftr['FOLD'] != fold] dfev_ = dftr[dftr['FOLD'] == fold] tf = TfidfVectorizer(ngram_range=(1, 2)) tf = tf.fit(dftr_['full_text']) tr_text_feats = tf.transform(dftr_['full_text']) ev_text_feats = tf.transform(dfev_['full_text']) te_text_feats = tf.transform(dfte['full_text']) clf = MultiOutputRegressor(LinearRegression(n_jobs=-1, normalize=True)) clf.fit(tr_text_feats, dftr_[target_cols].values) ev_preds = clf.predict(ev_text_feats) score = comp_score(dfev_[target_cols].values, ev_preds) scores.append(score) print('Fold : {} EV score: {}'.format(fold, score)) preds.append(clf.predict(te_text_feats))
code
327528/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import math import numpy as np from keras.layers import Input from keras import backend as K from keras.engine.topology import Layer from skimage.util.montage import montage2d
code
327528/cell_5
[ "image_output_1.png" ]
from IPython.display import display, Image from PIL.Image import fromarray from io import BytesIO from keras.engine.topology import Layer from keras.layers import Input from numpy import asarray, uint8, clip from skimage.util.montage import montage2d import math import numpy as np def nbimage(data, vmin=None, vmax=None, vsym=False, saveas=None): """ Display raw data as a notebook inline image. Parameters: data: array-like object, two or three dimensions. If three dimensional, first or last dimension must have length 3 or 4 and will be interpreted as color (RGB or RGBA). vmin, vmax, vsym: refer to rerange() saveas: Save image file to disk (optional). Proper file name extension will be appended to the pathname given. [ None ] """ from IPython.display import display, Image from PIL.Image import fromarray from io import BytesIO data = rerange(data, vmin, vmax, vsym) data = data.squeeze() if data.ndim == 3 and 3 <= data.shape[0] <= 4: data = data.transpose((1, 2, 0)) s = BytesIO() fromarray(data).save(s, 'png') if saveas is not None: open(saveas + '.png', 'wb').write(s) def rerange(data, vmin=None, vmax=None, vsym=False): """ Rescale values of data array to fit the range 0 ... 255 and convert to uint8. Parameters: data: array-like object. if data.dtype == uint8, no scaling will occur. vmin: original array value that will map to 0 in the output. [ data.min() ] vmax: original array value that will map to 255 in the output. [ data.max() ] vsym: ensure that 0 will map to gray (if True, may override either vmin or vmax to accommodate all values.) [ False ] """ from numpy import asarray, uint8, clip data = asarray(data) if data.dtype != uint8: if vmin is None: vmin = data.min() if vmax is None: vmax = data.max() if vsym: vmax = max(abs(vmin), abs(vmax)) vmin = -vmax data = (data - vmin) * (256 / (vmax - vmin)) data = clip(data, 0, 255).astype(uint8) return data class Gaussian2D(Layer): def __init__(self, output_shape, **kwargs): self.output_shape_ = output_shape self.height = output_shape[2] self.width = output_shape[3] self.grid = np.dstack(np.mgrid[-1:1:2.0 / self.height, -1:1:2.0 / self.width])[None, ...] super(Gaussian2D, self).__init__(**kwargs) def call(self, inputs, mask=None): mu, sigma, corr, scale = inputs mu = K.tanh(mu) * 0.95 sigma = K.exp(sigma) + 1e-05 corr = K.tanh(corr[:, 0]) * 0.95 scale = K.exp(scale[:, 0]) mu0 = K.permute_dimensions(mu[:, 0], (0, 'x', 'x', 'x')) mu1 = K.permute_dimensions(mu[:, 1], (0, 'x', 'x', 'x')) sigma0 = K.permute_dimensions(sigma[:, 0], (0, 'x', 'x', 'x')) sigma1 = K.permute_dimensions(sigma[:, 1], (0, 'x', 'x', 'x')) grid0 = self.grid[..., 0] grid1 = self.grid[..., 1] corr = K.permute_dimensions(corr, (0, 'x', 'x', 'x')) scale = K.permute_dimensions(scale, (0, 'x', 'x', 'x')) return K.tanh(scale / (2.0 * math.pi * sigma0 * sigma1 * K.sqrt(1.0 - corr * corr)) * K.exp(-(1.0 / (2.0 * (1.0 - corr * corr)) * ((grid0 - mu0) * (grid0 - mu0) / (sigma0 * sigma0) + (grid1 - mu1) * (grid1 - mu1) / (sigma1 * sigma1) - 2.0 * corr * (grid0 - mu0) * (grid1 - mu1) / sigma0 / sigma1)))) def get_output_shape_for(self, input_shape): return self.output_shape_ mu_input = Input((2,)) sigma_input = Input((2,)) corr_input = Input((1,)) scale_input = Input((1,)) g = Gaussian2D(output_shape=(None, 1, 100, 100))([mu_input, sigma_input, corr_input, scale_input]) n = 3 * 3 mu = np.random.normal(size=(n, 2)) / 3 sigma = np.random.uniform(-3, -2, size=(n, 2)) corr = np.random.normal(size=(n, 1)) / 5 scale = np.random.normal(size=(n, 1)) gaussians = g.eval({mu_input: mu.astype('float32'), sigma_input: sigma.astype('float32'), corr_input: corr.astype('float32'), scale_input: scale.astype('float32')}) nbimage(montage2d(gaussians.squeeze().clip(0, 1)))
code
122249691/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.python.keras.layers import Dense, Flatten import PIL import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(6): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") resnet_model = Sequential() pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet') for layer in pretrained_model.layers: layer.trainable = False resnet_model.add(pretrained_model) resnet_model.add(Flatten()) resnet_model.add(Dense(512, activation='relu')) resnet_model.add(Dense(5, activation='softmax')) resnet_model.summary() resnet_model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) epochs = 10 history = resnet_model.fit(train_ds, validation_data=val_ds, epochs=epochs)
code
122249691/cell_9
[ "text_plain_output_1.png" ]
import PIL import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(6): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype('uint8')) plt.title(class_names[labels[i]]) plt.axis('off')
code
122249691/cell_4
[ "text_plain_output_1.png" ]
import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) print(data_dir)
code
122249691/cell_6
[ "image_output_1.png" ]
import PIL import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
code
122249691/cell_11
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import Sequential from tensorflow.python.keras.layers import Dense, Flatten import PIL import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) resnet_model = Sequential() pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet') for layer in pretrained_model.layers: layer.trainable = False resnet_model.add(pretrained_model) resnet_model.add(Flatten()) resnet_model.add(Dense(512, activation='relu')) resnet_model.add(Dense(5, activation='softmax')) resnet_model.summary()
code
122249691/cell_7
[ "text_plain_output_1.png" ]
import PIL import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
code
122249691/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.python.keras.layers import Dense, Flatten import PIL import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(6): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") resnet_model = Sequential() pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet') for layer in pretrained_model.layers: layer.trainable = False resnet_model.add(pretrained_model) resnet_model.add(Flatten()) resnet_model.add(Dense(512, activation='relu')) resnet_model.add(Dense(5, activation='softmax')) resnet_model.summary() resnet_model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) epochs = 10 history = resnet_model.fit(train_ds, validation_data=val_ds, epochs=epochs) import cv2 image = cv2.imread(str(roses[0])) image_resized = cv2.resize(image, (img_height, img_width)) image = np.expand_dims(image_resized, axis=0) pred = resnet_model.predict(image) output_class = class_names[np.argmax(pred)] print('The predicted class is', output_class)
code
122249691/cell_8
[ "text_plain_output_1.png" ]
import PIL import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names print(class_names)
code
122249691/cell_15
[ "text_plain_output_1.png" ]
from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.python.keras.layers import Dense, Flatten import PIL import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(6): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") resnet_model = Sequential() pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet') for layer in pretrained_model.layers: layer.trainable = False resnet_model.add(pretrained_model) resnet_model.add(Flatten()) resnet_model.add(Dense(512, activation='relu')) resnet_model.add(Dense(5, activation='softmax')) resnet_model.summary() resnet_model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) epochs = 10 history = resnet_model.fit(train_ds, validation_data=val_ds, epochs=epochs) fig1 = plt.gcf() plt.axis(ymin=0.4, ymax=1) plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.grid() plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epochs') plt.legend(['train', 'validation']) plt.show()
code
122249691/cell_16
[ "text_plain_output_1.png" ]
import PIL import cv2 import numpy as np # linear algebra import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) import cv2 image = cv2.imread(str(roses[0])) image_resized = cv2.resize(image, (img_height, img_width)) image = np.expand_dims(image_resized, axis=0) print(image.shape)
code
122249691/cell_3
[ "text_plain_output_1.png" ]
import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir)
code
122249691/cell_17
[ "image_output_1.png" ]
from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam from tensorflow.python.keras.layers import Dense, Flatten import PIL import cv2 import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np # linear algebra import pathlib import tensorflow as tf import pathlib dataset_url = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz' data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True) data_dir = pathlib.Path(data_dir) roses = list(data_dir.glob('roses/*')) PIL.Image.open(str(roses[0])) img_height, img_width = (180, 180) batch_size = 32 train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) class_names = train_ds.class_names import matplotlib.pyplot as plt plt.figure(figsize=(10, 10)) for images, labels in train_ds.take(1): for i in range(6): ax = plt.subplot(3, 3, i + 1) plt.imshow(images[i].numpy().astype("uint8")) plt.title(class_names[labels[i]]) plt.axis("off") resnet_model = Sequential() pretrained_model = tf.keras.applications.ResNet50(include_top=False, input_shape=(180, 180, 3), pooling='avg', classes=5, weights='imagenet') for layer in pretrained_model.layers: layer.trainable = False resnet_model.add(pretrained_model) resnet_model.add(Flatten()) resnet_model.add(Dense(512, activation='relu')) resnet_model.add(Dense(5, activation='softmax')) resnet_model.summary() resnet_model.compile(optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy']) epochs = 10 history = resnet_model.fit(train_ds, validation_data=val_ds, epochs=epochs) import cv2 image = cv2.imread(str(roses[0])) image_resized = cv2.resize(image, (img_height, img_width)) image = np.expand_dims(image_resized, axis=0) pred = resnet_model.predict(image) print(pred)
code