path
stringlengths
13
17
screenshot_names
listlengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
121150522/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.pivot_table(index='CryoSleep', columns='Transported', aggfunc={'Transported': 'count'}) sns.countplot(x='Destination', data=df)
code
121150522/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.pivot_table(index='CryoSleep', columns='Transported', aggfunc={'Transported': 'count'}) df_count = df[['Age']].apply(pd.value_counts) plt.xticks(rotation=85) healthy = df[df['Age'] <= 80] age_s = sns.catplot(x='Age', kind='count', hue='Transported', height=20, aspect=11.7 / 8.27, data=healthy) plt.legend(title_fontsize='400') plt.show()
code
121150522/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') sns.catplot(x='HomePlanet', kind='count', hue='Transported', data=df) plt.show()
code
121150522/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
121150522/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') sns.countplot(x='HomePlanet', data=df)
code
121150522/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.pivot_table(index='CryoSleep', columns='Transported', aggfunc={'Transported': 'count'}) df.head(3)
code
121150522/cell_16
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') sns.catplot(x='CryoSleep', kind='count', hue='Transported', data=df)
code
121150522/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/spaceship-titanic/train.csv') df.head(100)
code
121150522/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.pivot_table(index='CryoSleep', columns='Transported', aggfunc={'Transported': 'count'})
code
121150522/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/spaceship-titanic/train.csv') df.CryoSleep.value_counts().plot(kind='pie', figsize=(12, 5), autopct='%0.1f%%') plt.xlabel('Percentage of Passengers CryoSleeping') plt.ylabel('') plt.show()
code
121150522/cell_5
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/spaceship-titanic/train.csv') df.describe()
code
16122912/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_X, train_Y) model.intercept_ model.coef_ train_predict = model.predict(train_X)
code
16122912/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.head()
code
16122912/cell_23
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_X, train_Y) model.intercept_
code
16122912/cell_20
[ "text_plain_output_1.png" ]
df_final.Value.plot(kind='box')
code
16122912/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df['Year']
code
16122912/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_X, train_Y) model.intercept_ model.coef_ train_predict = model.predict(train_X) test_predict = model.predict(test_X)
code
16122912/cell_2
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16122912/cell_19
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3) df_final.columns.dtype df_column_category = df_final.select_dtypes(exclude=np.number).columns df_column_category df_final.isna().sum() df_final['Value'].value_counts()
code
16122912/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16122912/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns
code
16122912/cell_18
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3) df_final.columns.dtype df_column_category = df_final.select_dtypes(exclude=np.number).columns df_column_category df_final.isna().sum()
code
16122912/cell_28
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error model = LinearRegression() model.fit(train_X, train_Y) model.intercept_ model.coef_ train_predict = model.predict(train_X) test_predict = model.predict(test_X) print(mean_squared_error(train_Y, train_predict)) print(mean_squared_error(test_Y, test_predict))
code
16122912/cell_15
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3) df_final.columns.dtype
code
16122912/cell_16
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3) df_final.columns.dtype df_column_category = df_final.select_dtypes(exclude=np.number).columns df_column_category
code
16122912/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape train_X, test_X, train_Y, test_Y = train_test_split(x, y, test_size=0.3) df_final.columns.dtype df_column_category = df_final.select_dtypes(exclude=np.number).columns df_column_category df_final['Year'].value_counts()
code
16122912/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_X, train_Y) model.intercept_ model.coef_
code
16122912/cell_22
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression model = LinearRegression() model.fit(train_X, train_Y)
code
16122912/cell_27
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_absolute_error, mean_squared_error model = LinearRegression() model.fit(train_X, train_Y) model.intercept_ model.coef_ train_predict = model.predict(train_X) test_predict = model.predict(test_X) print(mean_absolute_error(train_Y, train_predict)) print(mean_absolute_error(test_Y, test_predict))
code
16122912/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df.columns cols_to_Encode = ['Gender', 'Race/ Ethnicity', 'Indicator Category'] continuous_cols = ['Value'] encoded_cols = pd.get_dummies(df[cols_to_Encode]) df_final = pd.concat([encoded_cols, df[continuous_cols]], axis=1) y = df_final['Value'] x = df_final.drop(columns='Value') df_final.shape
code
16122912/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') df['Notes'].value_counts()
code
303338/cell_2
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import seaborn as sns sns.set_style('whitegrid') zika = pd.read_csv('../input/cdc_zika.csv') zika.groupby('location').size().reset_index().rename(columns={0: 'count'})
code
104114996/cell_13
[ "text_html_output_1.png" ]
df_embds.head()
code
104114996/cell_9
[ "text_html_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import GlobalMaxPooling2D import tensorflow as tf import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary()
code
104114996/cell_4
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df.head()
code
104114996/cell_23
[ "text_html_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() vgg_emb = get_embedding(model_2, df.iloc[0].image) vgg_emb.shape
code
104114996/cell_33
[ "text_html_output_1.png" ]
df_sample = df.copy() map_embeddings = df_sample['image'].apply(lambda img: get_embedding(model_3, img)) df_embds_vgg19 = map_embeddings.apply(pd.Series)
code
104114996/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores def getmatch_score(resnet_50, col, col1): match_list = [] for index in range(resnet_50.shape[0]): try: actual_match = resnet_50[col].iloc[index] predicted_match = resnet_50[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: return match_list resnet_50['Match_Master_Score'] = getmatch_score(resnet_50, 'masterCategory', 'Recommended_master_category') resnet_50['Match_Sub_Score'] = getmatch_score(resnet_50, 'subCategory', 'Recommended_sub_category') sn.distplot(resnet_50['mean_recommended_score'])
code
104114996/cell_29
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sn import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores def getmatch_score(resnet_50, col, col1): match_list = [] for index in range(resnet_50.shape[0]): try: actual_match = resnet_50[col].iloc[index] predicted_match = resnet_50[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: return match_list resnet_50['Match_Master_Score'] = getmatch_score(resnet_50, 'masterCategory', 'Recommended_master_category') resnet_50['Match_Sub_Score'] = getmatch_score(resnet_50, 'subCategory', 'Recommended_sub_category') img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() vgg_emb = get_embedding(model_2, df.iloc[0].image) vgg_emb.shape a = [list(i) for i in complete_indices_vgg] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] vgg_16 = pd.DataFrame() vgg_16['id'] = df['id'] vgg_16['recommended_index'] = [list(i) for i in complete_indices_vgg] vgg_16['recommended_scores'] = complete_scores_vgg vgg_16['masterCategory'] = df['masterCategory'] vgg_16['subCategory'] = df['subCategory'] vgg_16['Recommended_master_category'] = mast_cat vgg_16['Recommended_sub_category'] = sub_cat vgg_16['mean_recommended_score'] = mean_sim_scores_vgg def getmatch_score(data, col, col1): match_list = [] for index in range(data.shape[0]): try: actual_match = data[col].iloc[index] predicted_match = data[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: return match_list vgg_16['Match_Master_Score'] = getmatch_score(vgg_16, 'masterCategory', 'Recommended_master_category') vgg_16['Match_Sub_Score'] = getmatch_score(vgg_16, 'subCategory', 'Recommended_sub_category') sn.distplot(vgg_16['mean_recommended_score'])
code
104114996/cell_26
[ "text_html_output_1.png" ]
def get_recommendations(indices, cosine_sim, index, df, top_n=5): sim_index = indices[index] sim_scores = list(enumerate(cosine_sim[sim_index])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:top_n + 1] index_rec = [i[0] for i in sim_scores] index_sim = [i[1] for i in sim_scores] return (indices.iloc[index_rec].index, index_sim) cosine_sim = 1 - pairwise_distances(df_embds_vgg16, metric='cosine') indices = pd.Series(range(len(df)), index=df.index) mean_sim_scores_vgg = [] complete_indices_vgg = [] complete_scores_vgg = [] for index in range(df.shape[0]): try: index, sim_array = get_recommendations(indices, cosine_sim, index, df, top_n=5) mean_score = np.mean(sim_array) mean_sim_scores_vgg.append(mean_score) complete_indices_vgg.append(index) complete_scores_vgg.append(sim_array) except Exception as ex: print(f'Following exception : {ex} occured at the index : {index}')
code
104114996/cell_11
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape
code
104114996/cell_19
[ "text_html_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores def getmatch_score(resnet_50, col, col1): match_list = [] for index in range(resnet_50.shape[0]): try: actual_match = resnet_50[col].iloc[index] predicted_match = resnet_50[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: return match_list resnet_50['Match_Master_Score'] = getmatch_score(resnet_50, 'masterCategory', 'Recommended_master_category') resnet_50['Match_Sub_Score'] = getmatch_score(resnet_50, 'subCategory', 'Recommended_sub_category') resnet_50.head(2)
code
104114996/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
104114996/cell_18
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores def getmatch_score(resnet_50, col, col1): match_list = [] for index in range(resnet_50.shape[0]): try: actual_match = resnet_50[col].iloc[index] predicted_match = resnet_50[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: print(f'Following exception : {ex} occured at index : {index}') return match_list resnet_50['Match_Master_Score'] = getmatch_score(resnet_50, 'masterCategory', 'Recommended_master_category') resnet_50['Match_Sub_Score'] = getmatch_score(resnet_50, 'subCategory', 'Recommended_sub_category') resnet_50.head(2)
code
104114996/cell_32
[ "text_html_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() vgg_emb = get_embedding(model_2, df.iloc[0].image) vgg_emb.shape a = [list(i) for i in complete_indices_vgg] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] vgg_16 = pd.DataFrame() vgg_16['id'] = df['id'] vgg_16['recommended_index'] = [list(i) for i in complete_indices_vgg] vgg_16['recommended_scores'] = complete_scores_vgg vgg_16['masterCategory'] = df['masterCategory'] vgg_16['subCategory'] = df['subCategory'] vgg_16['Recommended_master_category'] = mast_cat vgg_16['Recommended_sub_category'] = sub_cat vgg_16['mean_recommended_score'] = mean_sim_scores_vgg img_width, img_height, _ = (224, 224, 3) model_3 = VGG19(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_3.trainable = False model_3 = tf.keras.models.Sequential([model_3, GlobalMaxPooling2D()]) model_3.summary() vgg_emb_19 = get_embedding(model_3, df.iloc[0].image) vgg_emb_19.shape
code
104114996/cell_28
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores def getmatch_score(resnet_50, col, col1): match_list = [] for index in range(resnet_50.shape[0]): try: actual_match = resnet_50[col].iloc[index] predicted_match = resnet_50[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: return match_list resnet_50['Match_Master_Score'] = getmatch_score(resnet_50, 'masterCategory', 'Recommended_master_category') resnet_50['Match_Sub_Score'] = getmatch_score(resnet_50, 'subCategory', 'Recommended_sub_category') img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() vgg_emb = get_embedding(model_2, df.iloc[0].image) vgg_emb.shape a = [list(i) for i in complete_indices_vgg] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] vgg_16 = pd.DataFrame() vgg_16['id'] = df['id'] vgg_16['recommended_index'] = [list(i) for i in complete_indices_vgg] vgg_16['recommended_scores'] = complete_scores_vgg vgg_16['masterCategory'] = df['masterCategory'] vgg_16['subCategory'] = df['subCategory'] vgg_16['Recommended_master_category'] = mast_cat vgg_16['Recommended_sub_category'] = sub_cat vgg_16['mean_recommended_score'] = mean_sim_scores_vgg def getmatch_score(data, col, col1): match_list = [] for index in range(data.shape[0]): try: actual_match = data[col].iloc[index] predicted_match = data[col1].iloc[index] count = 0 for item in predicted_match: if item == actual_match: count += 1 match_list.append(count / 5 * 100) except Exception as ex: print(f'Following exception : {ex} occured at index : {index}') return match_list vgg_16['Match_Master_Score'] = getmatch_score(vgg_16, 'masterCategory', 'Recommended_master_category') vgg_16['Match_Sub_Score'] = getmatch_score(vgg_16, 'subCategory', 'Recommended_sub_category') vgg_16.head(2)
code
104114996/cell_8
[ "text_plain_output_1.png" ]
import tensorflow as tf import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__
code
104114996/cell_15
[ "text_plain_output_1.png" ]
from sklearn.metrics.pairwise import pairwise_distances from sklearn.metrics.pairwise import pairwise_distances cosine_sim = 1 - pairwise_distances(df_embds, metric='cosine') cosine_sim[:4, :4]
code
104114996/cell_16
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
def get_recommendations(indices, cosine_sim, index, df, top_n=5): sim_index = indices[index] sim_scores = list(enumerate(cosine_sim[sim_index])) sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True) sim_scores = sim_scores[1:top_n + 1] index_rec = [i[0] for i in sim_scores] index_sim = [i[1] for i in sim_scores] return (indices.iloc[index_rec].index, index_sim) cosine_sim = 1 - pairwise_distances(df_embds, metric='cosine') indices = pd.Series(range(len(df)), index=df.index) mean_sim_scores = [] complete_indices = [] complete_scores = [] for index in range(df.shape[0]): try: index, sim_array = get_recommendations(indices, cosine_sim, index, df, top_n=5) mean_score = np.mean(sim_array) mean_sim_scores.append(mean_score) complete_indices.append(index) complete_scores.append(sim_array) except Exception as ex: print(f'Following exception : {ex} occured at the index : {index}')
code
104114996/cell_3
[ "text_plain_output_1.png" ]
import os import os import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' print(os.listdir(DATASET_PATH))
code
104114996/cell_17
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores resnet_50.head(2)
code
104114996/cell_31
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.layers import GlobalMaxPooling2D import tensorflow as tf import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() img_width, img_height, _ = (224, 224, 3) model_3 = VGG19(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_3.trainable = False model_3 = tf.keras.models.Sequential([model_3, GlobalMaxPooling2D()]) model_3.summary()
code
104114996/cell_24
[ "text_html_output_1.png" ]
df_sample = df.copy() map_embeddings = df_sample['image'].apply(lambda img: get_embedding(model_2, img)) df_embds_vgg16 = map_embeddings.apply(pd.Series)
code
104114996/cell_22
[ "text_plain_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.layers import GlobalMaxPooling2D import tensorflow as tf import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary()
code
104114996/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D from tensorflow.keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tensorflow as tf import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) def plot_figures(figures,nrows = 1,ncols = 1,figsize = (8,8)): fig,axeslist = plt.subplots(ncols = ncols,nrows = nrows,figsize = figsize) for index,title in enumerate(figures): axeslist.ravel()[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) axeslist.ravel()[index].set_title(title) axeslist.ravel()[index].set_axis_off() plt.tight_layout() def img_path(img): DATASET_PATH = '../input/fashion-product-images-small/myntradataset' return DATASET_PATH+"/images/"+img def load_image(img, resized_fac = 0.1): img = cv2.imread(img_path(img)) w, h, _ = img.shape resized = cv2.resize(img, (int(h*resized_fac), int(w*resized_fac)), interpolation = cv2.INTER_AREA) return resized import tensorflow as tf import keras from keras import Model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.vgg19 import VGG19 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg16 import preprocess_input, decode_predictions from tensorflow.keras.applications.vgg19 import preprocess_input, decode_predictions from tensorflow.keras.layers import GlobalMaxPooling2D tf.__version__ img_width, img_height, _ = (224, 224, 3) model_1 = ResNet50(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_1.trainable = False model_1 = tf.keras.models.Sequential([model_1, GlobalMaxPooling2D()]) model_1.summary() def get_embedding(model, img_name): img = image.load_img(img_path(img_name), target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) return model.predict(x).reshape(-1) emb = get_embedding(model_1, df.iloc[0].image) emb.shape a = [list(i) for i in complete_indices] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] resnet_50 = pd.DataFrame() resnet_50['id'] = df['id'] resnet_50['recommended_index'] = [list(i) for i in complete_indices] resnet_50['recommended_scores'] = complete_scores resnet_50['masterCategory'] = df['masterCategory'] resnet_50['subCategory'] = df['subCategory'] resnet_50['Recommended_master_category'] = mast_cat resnet_50['Recommended_sub_category'] = sub_cat resnet_50['mean_recommended_score'] = mean_sim_scores img_width, img_height, _ = (224, 224, 3) model_2 = VGG16(weights='imagenet', include_top=False, input_shape=(img_width, img_height, 3)) model_2.trainable = False model_2 = tf.keras.models.Sequential([model_2, GlobalMaxPooling2D()]) model_2.summary() vgg_emb = get_embedding(model_2, df.iloc[0].image) vgg_emb.shape a = [list(i) for i in complete_indices_vgg] sub_cat = [df['subCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] mast_cat = [df['masterCategory'].iloc[a[index]].to_list() for index in range(df.shape[0])] vgg_16 = pd.DataFrame() vgg_16['id'] = df['id'] vgg_16['recommended_index'] = [list(i) for i in complete_indices_vgg] vgg_16['recommended_scores'] = complete_scores_vgg vgg_16['masterCategory'] = df['masterCategory'] vgg_16['subCategory'] = df['subCategory'] vgg_16['Recommended_master_category'] = mast_cat vgg_16['Recommended_sub_category'] = sub_cat vgg_16['mean_recommended_score'] = mean_sim_scores_vgg vgg_16.head(2)
code
104114996/cell_12
[ "text_html_output_1.png" ]
df_sample = df.copy() map_embeddings = df_sample['image'].apply(lambda img: get_embedding(model_1, img)) df_embds = map_embeddings.apply(pd.Series)
code
104114996/cell_5
[ "text_plain_output_1.png" ]
import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os DATASET_PATH = '../input/fashion-product-images-small/myntradataset' DATASET_PATH = '../input/fashion-product-images-small/myntradataset' df = pd.read_csv(DATASET_PATH + '/styles.csv', nrows=5000, error_bad_lines=False) df['image'] = df.apply(lambda row: str(row['id']) + '.jpg', axis=1).reset_index(drop=True) df.head()
code
49120120/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import keras import tensorflow as tf tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) img_height, img_width = (256, 256) checkpoint_filepath = './weights.best.hdf5' with tpu_strategy.scope(): train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, validation_split=0.2) train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='training') validation_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='validation') test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) mc = keras.callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_accuracy', mode='max', verbose=1, save_best_only=True) input_tensor = keras.layers.Input(shape=(img_height, img_width, 3)) v3 = keras.applications.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor, input_shape=None, pooling=None, classes=2, classifier_activation='softmax') model = keras.models.Sequential() model.add(v3) model.add(keras.layers.GlobalAveragePooling2D()) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(4096, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(64, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(10, activation='softmax')) train_data_dir = '/kaggle/input/burning/train/' test_data_dir = '/kaggle/input/burning/test/' batch_size = 64 epochs = 128 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit_generator(train_generator, epochs=epochs, validation_data=validation_generator, callbacks=[mc, es]) EPOCHS = 128 checkpoint_filepath = './weights.best.hdf5' es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) mc = keras.callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_accuracy', mode='max', verbose=1, save_best_only=True) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) datagen = keras.preprocessing.image.ImageDataGenerator(rotation_range=10, zoom_range=0.1, shear_range=0.1, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True) history = model.fit_generator(datagen.flow(X_train.reshape(-1, 28, 28, 1), y_train), epochs=EPOCHS, validation_data=(X_valid, y_valid), callbacks=[mc, es]) model.evaluate(test_datagen.flow_from_directory(test_data_dir))
code
49120120/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import os # accessing directory structure for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49120120/cell_6
[ "text_plain_output_1.png" ]
import keras import tensorflow as tf tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) img_height, img_width = (256, 256) checkpoint_filepath = './weights.best.hdf5' with tpu_strategy.scope(): train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, validation_split=0.2) train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='training') validation_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='validation') test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) mc = keras.callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_accuracy', mode='max', verbose=1, save_best_only=True) input_tensor = keras.layers.Input(shape=(img_height, img_width, 3)) v3 = keras.applications.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor, input_shape=None, pooling=None, classes=2, classifier_activation='softmax') model = keras.models.Sequential() model.add(v3) model.add(keras.layers.GlobalAveragePooling2D()) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(4096, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(64, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(10, activation='softmax')) train_data_dir = '/kaggle/input/burning/train/' test_data_dir = '/kaggle/input/burning/test/' batch_size = 64 epochs = 128 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
code
49120120/cell_7
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import keras import tensorflow as tf tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) tpu_strategy = tf.distribute.experimental.TPUStrategy(tpu) img_height, img_width = (256, 256) checkpoint_filepath = './weights.best.hdf5' with tpu_strategy.scope(): train_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, width_shift_range=0.1, height_shift_range=0.1, horizontal_flip=True, validation_split=0.2) train_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='training') validation_generator = train_datagen.flow_from_directory(train_data_dir, target_size=(img_height, img_width), batch_size=batch_size, class_mode='sparse', subset='validation') test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) es = keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=200) mc = keras.callbacks.ModelCheckpoint(checkpoint_filepath, monitor='val_accuracy', mode='max', verbose=1, save_best_only=True) input_tensor = keras.layers.Input(shape=(img_height, img_width, 3)) v3 = keras.applications.InceptionV3(include_top=False, weights='imagenet', input_tensor=input_tensor, input_shape=None, pooling=None, classes=2, classifier_activation='softmax') model = keras.models.Sequential() model.add(v3) model.add(keras.layers.GlobalAveragePooling2D()) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(4096, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(64, activation='relu')) model.add(keras.layers.Dropout(0.2)) model.add(keras.layers.Dense(10, activation='softmax')) train_data_dir = '/kaggle/input/burning/train/' test_data_dir = '/kaggle/input/burning/test/' batch_size = 64 epochs = 128 model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit_generator(train_generator, epochs=epochs, validation_data=validation_generator, callbacks=[mc, es])
code
129032747/cell_9
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df.info()
code
129032747/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T plt.axis('equal') expert_articles = df[df['is_expert'] == 1] non_expert_articles = df[df['is_expert'] == 0] fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 7)) axes[0].hist(non_expert_articles['percent_helpful'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['percent_helpful'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Percent Helpful') axes[0].set_xlim(70, 100) axes[0].legend(loc='upper left') sns.stripplot(x='is_expert', y='percent_helpful', data=df, jitter=0.35, ax=axes[1]) sns.barplot(x='is_expert', y='percent_helpful', data=df, errorbar=None, width=0.5, ax=axes[2]) axes[2].set_xlabel('Is Expert') axes[2].set_ylabel('Percent Helpful') axes[2].axhline(y=88.3, color='green', linestyle='--') axes[2].set_ylim(50, 90) plt.tight_layout() plt.show()
code
129032747/cell_30
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T plt.axis('equal') expert_articles = df[df['is_expert'] == 1] non_expert_articles = df[df['is_expert'] == 0] # Create a grid of plots with 1 row and 3 columns fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7)) # Plot the histogram of percent helpful for non-expert and expert articles axes[0].hist(non_expert_articles['percent_helpful'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['percent_helpful'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Percent Helpful') axes[0].set_xlim(70,100) axes[0].legend(loc='upper left') # Plot the strip plot of percent helpful by expert sns.stripplot(x='is_expert', y='percent_helpful', data=df, jitter=0.35, ax=axes[1]) # type: ignore # Plot the bar plot of percent helpful by expert sns.barplot(x='is_expert', y='percent_helpful', data=df, errorbar=None, width=0.5, ax=axes[2]) axes[2].set_xlabel('Is Expert') axes[2].set_ylabel('Percent Helpful') axes[2].axhline(y=88.3, color='green', linestyle='--') axes[2].set_ylim(50,90) # Adjust the layout plt.tight_layout() # Show the plot plt.show() # Create a grid of plots with 1 row and 2 columns fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7)) # Plot the histogram of character count for non-expert articles axes[0].hist(non_expert_articles['character_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['character_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Number of Characters') axes[0].set_ylabel('Count') axes[0].set_xlim(0,18000) axes[0].set_yticks([]) # Plot the histogram of character count for expert articles axes[1].hist(non_expert_articles['word_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[1].hist(expert_articles['word_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[1].set_xlabel('Number of Words') axes[1].set_xlim(0,4000) axes[1].set_yticks([]) sns.regplot(x='character_count', y='word_count', data=df, line_kws={'color': 'red'}, ax=axes[2]) # Add legend axes[0].legend(loc='upper right') axes[1].legend(loc='upper right') # Display the plots plt.tight_layout() plt.show() fig, ax = plt.subplots(figsize=(10, 6)) sns.kdeplot(non_expert_articles['mean_paragraph_size'], fill=True, alpha=0.4, label='Non-Expert', ax=ax) sns.kdeplot(expert_articles['mean_paragraph_size'], fill=True, alpha=0.4, label='Expert', ax=ax) ax.set_xlabel('Mean Paragraph Size') ax.set_ylabel('Density') ax.set_title('Distribution of Mean Paragraph Size in Expert and Non-Expert Articles') ax.legend(loc='upper right') plt.xlim(0, 1000) plt.show()
code
129032747/cell_33
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T plt.axis('equal') expert_articles = df[df['is_expert'] == 1] non_expert_articles = df[df['is_expert'] == 0] # Create a grid of plots with 1 row and 3 columns fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7)) # Plot the histogram of percent helpful for non-expert and expert articles axes[0].hist(non_expert_articles['percent_helpful'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['percent_helpful'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Percent Helpful') axes[0].set_xlim(70,100) axes[0].legend(loc='upper left') # Plot the strip plot of percent helpful by expert sns.stripplot(x='is_expert', y='percent_helpful', data=df, jitter=0.35, ax=axes[1]) # type: ignore # Plot the bar plot of percent helpful by expert sns.barplot(x='is_expert', y='percent_helpful', data=df, errorbar=None, width=0.5, ax=axes[2]) axes[2].set_xlabel('Is Expert') axes[2].set_ylabel('Percent Helpful') axes[2].axhline(y=88.3, color='green', linestyle='--') axes[2].set_ylim(50,90) # Adjust the layout plt.tight_layout() # Show the plot plt.show() # Create a grid of plots with 1 row and 2 columns fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7)) # Plot the histogram of character count for non-expert articles axes[0].hist(non_expert_articles['character_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['character_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Number of Characters') axes[0].set_ylabel('Count') axes[0].set_xlim(0,18000) axes[0].set_yticks([]) # Plot the histogram of character count for expert articles axes[1].hist(non_expert_articles['word_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[1].hist(expert_articles['word_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[1].set_xlabel('Number of Words') axes[1].set_xlim(0,4000) axes[1].set_yticks([]) sns.regplot(x='character_count', y='word_count', data=df, line_kws={'color': 'red'}, ax=axes[2]) # Add legend axes[0].legend(loc='upper right') axes[1].legend(loc='upper right') # Display the plots plt.tight_layout() plt.show() fig, ax = plt.subplots(figsize=(10,6)) # Plot the kernel density estimate for non-expert and expert articles sns.kdeplot(non_expert_articles['mean_paragraph_size'], fill=True, alpha=0.4, label='Non-Expert', ax=ax) sns.kdeplot(expert_articles['mean_paragraph_size'], fill=True, alpha=0.4, label='Expert', ax=ax) # Set the labels for the x and y axes ax.set_xlabel('Mean Paragraph Size') ax.set_ylabel('Density') # Set the title of the plot ax.set_title('Distribution of Mean Paragraph Size in Expert and Non-Expert Articles') # Add a legend to the plot ax.legend(loc='upper right') plt.xlim(0,1000) # Show the plot plt.show() fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(18, 7)) axes[0].hist(non_expert_articles['introduction_size'], bins=90, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['introduction_size'], bins=90, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('introduction_size') axes[0].set_ylabel('Count') axes[0].set_xlim(0, 750) axes[0].set_yticks([]) axes[0].legend(loc='upper right') axes[1].hist(non_expert_articles['summary_size'], bins=90, alpha=0.4, label='Non-Expert', density=True) axes[1].hist(expert_articles['summary_size'], bins=90, alpha=0.5, label='Expert', density=True) axes[1].set_xlabel('summary_size') axes[1].set_xlim(0, 900) axes[1].set_yticks([]) axes[1].legend(loc='upper right') plt.show()
code
129032747/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') print(df.isnull().sum())
code
129032747/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T plt.pie(df['is_expert'].value_counts(), labels=['Expert', 'Non-Expert'], autopct='%1.1f%%') plt.axis('equal') plt.title('Expert vs Non-Expert Articles') plt.show()
code
129032747/cell_7
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format
code
129032747/cell_8
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df.head()
code
129032747/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.info()
code
129032747/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T
code
129032747/cell_27
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') df[['references_count', 'references_count_per_text_length', 'references_count_per_method']] = df[['references_count', 'references_count_per_text_length', 'references_count_per_method']].fillna(0) df = df.dropna(subset=['views', 'co_authors']) df.describe().T plt.axis('equal') expert_articles = df[df['is_expert'] == 1] non_expert_articles = df[df['is_expert'] == 0] # Create a grid of plots with 1 row and 3 columns fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20,7)) # Plot the histogram of percent helpful for non-expert and expert articles axes[0].hist(non_expert_articles['percent_helpful'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['percent_helpful'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Percent Helpful') axes[0].set_xlim(70,100) axes[0].legend(loc='upper left') # Plot the strip plot of percent helpful by expert sns.stripplot(x='is_expert', y='percent_helpful', data=df, jitter=0.35, ax=axes[1]) # type: ignore # Plot the bar plot of percent helpful by expert sns.barplot(x='is_expert', y='percent_helpful', data=df, errorbar=None, width=0.5, ax=axes[2]) axes[2].set_xlabel('Is Expert') axes[2].set_ylabel('Percent Helpful') axes[2].axhline(y=88.3, color='green', linestyle='--') axes[2].set_ylim(50,90) # Adjust the layout plt.tight_layout() # Show the plot plt.show() fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(20, 7)) axes[0].hist(non_expert_articles['character_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[0].hist(expert_articles['character_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[0].set_xlabel('Number of Characters') axes[0].set_ylabel('Count') axes[0].set_xlim(0, 18000) axes[0].set_yticks([]) axes[1].hist(non_expert_articles['word_count'], bins=50, alpha=0.4, label='Non-Expert', density=True) axes[1].hist(expert_articles['word_count'], bins=50, alpha=0.5, label='Expert', density=True) axes[1].set_xlabel('Number of Words') axes[1].set_xlim(0, 4000) axes[1].set_yticks([]) sns.regplot(x='character_count', y='word_count', data=df, line_kws={'color': 'red'}, ax=axes[2]) axes[0].legend(loc='upper right') axes[1].legend(loc='upper right') plt.tight_layout() plt.show()
code
129032747/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA from sklearn.ensemble import RandomForestRegressor from sklearn.feature_selection import SelectFromModel from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error import lightgbm as lgb from catboost import CatBoostRegressor from sklearn.linear_model import Ridge from sklearn.linear_model import ElasticNet sns.set_style('darkgrid') pd.options.display.float_format = '{:.4f}'.format df = pd.read_csv('/kaggle/input/wikihow-features/wikihow.csv') print(df[df['views'].isnull() | df['co_authors'].isnull()].iloc[:5, [0, 1, 19, 20, -1]])
code
128006019/cell_6
[ "image_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_comment.head()
code
128006019/cell_11
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 print(toxic_percentage)
code
128006019/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() plt.figure(figsize=(12, 6)) sns.histplot(data=toxic_comment, x='word_length', hue='toxic', bins=100, common_norm=False) plt.title('Word Length Distribution by Toxicity') plt.xlabel('Word Length') plt.ylabel('Frequency') plt.show()
code
128006019/cell_7
[ "image_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') unintended_bias_preprocessed.head()
code
128006019/cell_18
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() plt.figure(figsize=(12, 6)) sns.histplot(data=toxic_comment, x='char_length', hue='toxic', bins=100, common_norm=False) plt.title('Character Length Distribution by Toxicity') plt.xlabel('Character Length') plt.ylabel('Frequency') plt.show()
code
128006019/cell_32
[ "image_output_1.png" ]
from nltk import FreqDist import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() all_words = [] for words in toxic_comment['tokenized_words']: all_words.extend(words) freq_dist = FreqDist(all_words) plt.figure(figsize=(12, 6)) freq_dist.plot(20, cumulative=False) plt.show()
code
128006019/cell_8
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') unintended_bias.head()
code
128006019/cell_3
[ "image_output_1.png" ]
import nltk nltk.download('vader_lexicon') from nltk.sentiment import SentimentIntensityAnalyzer nltk.download('stopwords') nltk.download('wordnet')
code
128006019/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() plt.figure(figsize=(12, 6)) sns.histplot(data=toxic_comment[toxic_comment['word_length'] > 900], x='word_length', hue='is_toxic', bins=50, kde=True) plt.title('Distribution of Word Length for Toxic and Non-Toxic Comments (from 900 words)') plt.show()
code
128006019/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.figure(figsize=(16, 5)) plt.subplot(1, 2, 1) toxic_percentage.plot(kind='bar', color=['blue', 'orange']) plt.title('Toxic Comment Distribution (jigsaw-toxic-comment-train.csv)') plt.xlabel('Toxic') plt.ylabel('Percentage') plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.subplot(1, 2, 2) unintended_bias_toxic_percentage.plot(kind='bar', color=['blue', 'orange']) plt.title('Toxic Comment Distribution (jigsaw-unintended-bias-train.csv)') plt.xlabel('Toxic') plt.ylabel('Percentage') plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() plt.show()
code
128006019/cell_22
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() plt.figure(figsize=(12, 6)) sns.histplot(data=toxic_comment[toxic_comment['char_length'] > 2000], x='char_length', hue='is_toxic', bins=50, kde=True) plt.title('Distribution of Character Length for Toxic and Non-Toxic Comments (from 2000 characters)') plt.show()
code
128006019/cell_27
[ "image_output_1.png" ]
import pandas as pd import random toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') char_length_threshold = 4500 outliers = toxic_comment[toxic_comment['char_length'] > char_length_threshold] print(f'Number of outliers: {len(outliers)}') num_random_outliers = 1 random_indices = random.sample(range(len(outliers)), num_random_outliers) random_outliers = outliers.iloc[random_indices] for idx, outlier in random_outliers.iterrows(): print(f"\nIndex: {idx}\nComment: {outlier['comment_text']}")
code
128006019/cell_37
[ "image_output_1.png" ]
from nltk import FreqDist import matplotlib.pyplot as plt import pandas as pd import seaborn as sns toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.xticks([0, 1], ['Non-toxic', 'Toxic'], rotation=0) plt.tight_layout() all_words = [] for words in toxic_comment['tokenized_words']: all_words.extend(words) freq_dist = FreqDist(all_words) all_words = [] for words in toxic_comment['tokenized_words']: all_words.extend(words) freq_dist = FreqDist(all_words) plt.figure(figsize=(12, 6)) freq_dist.plot(20, cumulative=False) plt.show()
code
128006019/cell_12
[ "text_html_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_percentage = toxic_comment['toxic'].value_counts(normalize=True) * 100 unintended_bias_toxic_percentage = (unintended_bias['toxic'] > 0.5).value_counts(normalize=True) * 100 print(unintended_bias_toxic_percentage)
code
128006019/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd toxic_comment_processed_seqlen = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train-processed-seqlen128.csv') toxic_comment = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-toxic-comment-train.csv') unintended_bias_preprocessed = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train-processed-seqlen128.csv') unintended_bias = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/jigsaw-unintended-bias-train.csv') toxic_comment_processed_seqlen.head()
code
73101177/cell_21
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') fig = plt.figure(figsize=(20, 15)) for i, j in enumerate(num_data.columns): fig.add_subplot(4, 4, i + 1) sns.boxplot(x=j, data=num_data) plt.show()
code
73101177/cell_13
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') sns.pairplot(train)
code
73101177/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(train.isnull().sum()) sns.heatmap(train.isnull(), cbar=False, yticklabels=False)
code
73101177/cell_34
[ "image_output_1.png" ]
from sklearn.compose import make_column_transformer from sklearn.preprocessing import StandardScaler, RobustScaler,OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): pass cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists) X = train.drop(['id', 'target'], axis=1) y = train['target'] test_df = test.drop('id', axis=1) ct = make_column_transformer((OrdinalEncoder(), cat_lists), (StandardScaler(), ['cont1', 'cont2', 'cont3', 'cont4', 'cont5', 'cont7', 'cont9', 'cont10', 'cont11', 'cont12', 'cont13']), (RobustScaler(), ['cont0', 'cont6', 'cont8']), remainder='passthrough') X_train = pd.DataFrame(ct.fit_transform(X)) test = pd.DataFrame(ct.fit_transform(test_df)) X_train.head()
code
73101177/cell_23
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') fig = plt.figure(figsize=(20,15)) for i,j in enumerate(num_data.columns): fig.add_subplot(4,4,i+1) sns.boxplot(x=j,data=num_data) plt.show() fig = plt.figure(figsize=(20, 15)) for i, j in enumerate(num_data.columns): fig.add_subplot(4, 4, i + 1) ax = sns.kdeplot(x=j, data=num_data) ax = sns.histplot(x=j, data=num_data) plt.show()
code
73101177/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') print(test.shape) test.head()
code
73101177/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) def get_unique_sum(cat_list): pass cat_lists = list(train.select_dtypes('object').columns) get_unique_sum(cat_lists) X = train.drop(['id', 'target'], axis=1) y = train['target'] test_df = test.drop('id', axis=1) train['kfold'] = -1 train.head()
code
73101177/cell_26
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') fig = plt.figure(figsize=(20,15)) for i,j in enumerate(num_data.columns): fig.add_subplot(4,4,i+1) sns.boxplot(x=j,data=num_data) plt.show() fig = plt.figure(figsize=(20,15)) for i,j in enumerate(num_data.columns): fig.add_subplot(4,4,i+1) ax=sns.kdeplot(x=j,data=num_data) ax = sns.histplot(x=j,data=num_data) plt.show() fig = plt.figure(figsize=(20, 15)) for i, j in enumerate(cat_data.columns): fig.add_subplot(4, 4, i + 1) ax = sns.barplot(x=j, y=train['target'], data=cat_data) plt.show()
code
73101177/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73101177/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train.info()
code
73101177/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') plt.figure(figsize=(10, 6)) sns.heatmap(num_data.corr(), annot=True, cbar=True) plt.show()
code
73101177/cell_28
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') ss = pd.read_csv('../input/30-days-of-ml/sample_submission.csv') train_df = train.drop('id', axis=1) num_data = train_df.select_dtypes('number') cat_data = train_df.select_dtypes('object') fig = plt.figure(figsize=(20,15)) for i,j in enumerate(num_data.columns): fig.add_subplot(4,4,i+1) sns.boxplot(x=j,data=num_data) plt.show() fig = plt.figure(figsize=(20,15)) for i,j in enumerate(num_data.columns): fig.add_subplot(4,4,i+1) ax=sns.kdeplot(x=j,data=num_data) ax = sns.histplot(x=j,data=num_data) plt.show() fig = plt.figure(figsize=(20,15)) for i,j in enumerate(cat_data.columns): fig.add_subplot(4,4,i+1) ax=sns.barplot(x=j,y=train['target'],data=cat_data) plt.show() fig = plt.figure(figsize=(20, 15)) for i, j in enumerate(cat_data.columns): fig.add_subplot(4, 3, i + 1) ax = sns.boxplot(x=j, y='target', data=train) plt.show()
code