path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
34127932/cell_54
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape)
code
34127932/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') train_data.describe()
code
34127932/cell_52
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) dataTrain.head()
code
34127932/cell_49
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.countplot(x='embarked', hue='survived', data=dataTrain)
code
34127932/cell_18
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) sns.countplot(x='pclass', hue='survived', data=dataTrain)
code
34127932/cell_62
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import numpy as np #for mathematical manipulation of the data import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) (x_train.shape, y_train.shape) np.unique(y_train)
code
34127932/cell_58
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) y = dataTrain['survived'] x = dataTrain.drop('survived', axis=1) (x.shape, y.shape) from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler stdscale = MinMaxScaler() x_new = stdscale.fit_transform(x) testd = stdscale.transform(dataTest) (x_new.shape, testd.shape) X = pd.DataFrame(x_new, columns=x.columns) testData = pd.DataFrame(testd, columns=dataTest.columns) from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) (x_train.shape, y_train.shape)
code
34127932/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') train_data.info()
code
34127932/cell_15
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTest.head()
code
34127932/cell_3
[ "text_plain_output_1.png" ]
import os import os os.getcwd() os.chdir('/kaggle/input') os.listdir()
code
34127932/cell_17
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain['pclass'].value_counts()
code
34127932/cell_46
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.distplot(dataTrain['fare'])
code
34127932/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain.head()
code
34127932/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTrain.head()
code
34127932/cell_53
[ "text_html_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTest = pd.get_dummies(dataTest, columns=['sex']) dataTrain = pd.get_dummies(dataTrain, columns=['embarked']) dataTest = pd.get_dummies(dataTest, columns=['embarked']) dataTest.head()
code
34127932/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') train_data.head()
code
34127932/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd #for structuring the data import seaborn as sns #for visualization train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) sns.countplot(x='sibsp', hue='survived', data=dataTrain)
code
34127932/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd #for structuring the data train_data = pd.read_csv('train.csv') test_data = pd.read_csv('test.csv') dataTrain = train_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) passengerid = test_data['passenger_ID'] dataTest = test_data.drop(['passenger_ID', 'name', 'ticket', 'cabin'], axis=1) dataTrain = pd.get_dummies(dataTrain, columns=['sex']) dataTrain['sibsp'].value_counts()
code
16114195/cell_9
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) train['SalePrice'].hist(bins=50) y = train['SalePrice'].reset_index(drop=True)
code
16114195/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.describe()
code
16114195/cell_11
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x.info()
code
16114195/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train['SalePrice'].hist(bins=50)
code
16114195/cell_10
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x.describe()
code
16114195/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.reset_index(drop=True, inplace=True) train['SalePrice'] = np.log1p(train['SalePrice']) y = train['SalePrice'].reset_index(drop=True) train = train.drop(['Id', 'SalePrice'], axis=1) test = test.drop(['Id'], axis=1) x = pd.concat([train, test]).reset_index(drop=True) x['MSSubClass'] = x['MSSubClass'].apply(str) x['YrSold'] = x['YrSold'].astype(str) x['MoSold'] = x['MoSold'].astype(str) x['Functional'] = x['Functional'].fillna('Typ') x['Electrical'] = x['Electrical'].fillna('SBrkr') x['KitchenQual'] = x['KitchenQual'].fillna('TA') x['Exterior1st'] = x['Exterior1st'].fillna(x['Exterior1st'].mode()[0]) x['Exterior2nd'] = x['Exterior2nd'].fillna(x['Exterior2nd'].mode()[0]) x['SaleType'] = x['SaleType'].fillna(x['SaleType'].mode()[0]) for col in ('GarageYrBlt', 'GarageArea', 'GarageCars'): x[col] = x[col].fillna(0) for col in ['GarageType', 'GarageFinish', 'GarageQual', 'GarageCond']: x[col] = x[col].fillna('None') for col in ('BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath'): x[col] = x[col].fillna(0) for col in ('BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2'): x[col] = x[col].fillna('None') objects = [] for i in x.columns: if x[i].dtype == object: objects.append(i) x.update(x[objects].fillna('None')) numeric_dtypes = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64'] numerics = [] for i in x.columns: if x[i].dtype in numeric_dtypes: numerics.append(i) x.update(x[numerics].fillna(0)) x.info()
code
16114195/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
129020867/cell_2
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from tensorflow.keras.models import Model import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import numpy as np import tensorflow as tf def seed_everything(SEED): np.random.seed(SEED) tf.random.set_seed(SEED) seed = 42 seed_everything(seed) '\nResUNet++ architecture in Keras TensorFlow\n' import os import numpy as np import cv2 import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model def squeeze_excite_block(inputs, ratio=8): init = inputs channel_axis = -1 filters = init.shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) x = Multiply()([init, se]) return x def stem_block(x, n_filter, strides): x_init = x x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same')(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def resnet_block(x, n_filter, strides=1): x_init = x x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=1)(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def aspp_block(x, num_filters, rate_scale=1): x1 = Conv2D(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale), padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale), padding='same')(x) x2 = BatchNormalization()(x2) x3 = Conv2D(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale), padding='same')(x) x3 = BatchNormalization()(x3) x4 = Conv2D(num_filters, (3, 3), padding='same')(x) x4 = BatchNormalization()(x4) y = Add()([x1, x2, x3, x4]) y = Conv2D(num_filters, (1, 1), padding='same')(y) return y def attetion_block(g, x): """ g: Output of Parallel Encoder block x: Output of Previous Decoder block """ filters = x.shape[-1] g_conv = BatchNormalization()(g) g_conv = Activation('relu')(g_conv) g_conv = Conv2D(filters, (3, 3), padding='same')(g_conv) g_pool = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(g_conv) x_conv = BatchNormalization()(x) x_conv = Activation('relu')(x_conv) x_conv = Conv2D(filters, (3, 3), padding='same')(x_conv) gc_sum = Add()([g_pool, x_conv]) gc_conv = BatchNormalization()(gc_sum) gc_conv = Activation('relu')(gc_conv) gc_conv = Conv2D(filters, (3, 3), padding='same')(gc_conv) gc_mul = Multiply()([gc_conv, x]) return gc_mul def build_model(input_size=512): n_filters = [16, 32, 64, 128, 256] inputs = Input((input_size, input_size, 3)) c0 = inputs c1 = stem_block(c0, n_filters[0], strides=1) c2 = resnet_block(c1, n_filters[1], strides=2) c3 = resnet_block(c2, n_filters[2], strides=2) c4 = resnet_block(c3, n_filters[3], strides=2) b1 = aspp_block(c4, n_filters[4]) d1 = attetion_block(c3, b1) d1 = UpSampling2D((2, 2))(d1) d1 = Concatenate()([d1, c3]) d1 = resnet_block(d1, n_filters[3]) d2 = attetion_block(c2, d1) d2 = UpSampling2D((2, 2))(d2) d2 = Concatenate()([d2, c2]) d2 = resnet_block(d2, n_filters[2]) d3 = attetion_block(c1, d2) d3 = UpSampling2D((2, 2))(d3) d3 = Concatenate()([d3, c1]) d3 = resnet_block(d3, n_filters[1]) outputs = aspp_block(d3, n_filters[0]) outputs = Conv2D(1, (1, 1), padding='same')(outputs) outputs = Activation('sigmoid')(outputs) model = Model(inputs, outputs) return model unet = build_model() unet.summary()
code
129020867/cell_7
[ "text_plain_output_1.png" ]
from skimage.io import imshow from matplotlib import pyplot as plt imshow(x_train.next()[0].astype(np.float32)) plt.show() imshow(np.squeeze(y_train.next()[0].astype(np.float32))) plt.show() imshow(x_val.next()[0].astype(np.float32)) plt.show() imshow(np.squeeze(y_val.next()[0].astype(np.float32))) plt.show()
code
129020867/cell_15
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from skimage.io import imshow from matplotlib import pyplot as plt imshow(x_test.next()[0].astype(np.float32)) plt.show() imshow(np.squeeze(y_pred[0].astype(np.float32))) plt.show() imshow(y_test.next()[0].astype(np.float32)) plt.show()
code
129020867/cell_17
[ "text_plain_output_1.png" ]
from keras.preprocessing import image from keras.preprocessing import image from keras.preprocessing import image from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.optimizers import Adam, Nadam, SGD import csv import csv import numpy as np import numpy as np import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import tensorflow as tf import numpy as np import tensorflow as tf def seed_everything(SEED): np.random.seed(SEED) tf.random.set_seed(SEED) seed = 42 seed_everything(seed) '\nResUNet++ architecture in Keras TensorFlow\n' import os import numpy as np import cv2 import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model def squeeze_excite_block(inputs, ratio=8): init = inputs channel_axis = -1 filters = init.shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) x = Multiply()([init, se]) return x def stem_block(x, n_filter, strides): x_init = x x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same')(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def resnet_block(x, n_filter, strides=1): x_init = x x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=1)(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def aspp_block(x, num_filters, rate_scale=1): x1 = Conv2D(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale), padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale), padding='same')(x) x2 = BatchNormalization()(x2) x3 = Conv2D(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale), padding='same')(x) x3 = BatchNormalization()(x3) x4 = Conv2D(num_filters, (3, 3), padding='same')(x) x4 = BatchNormalization()(x4) y = Add()([x1, x2, x3, x4]) y = Conv2D(num_filters, (1, 1), padding='same')(y) return y def attetion_block(g, x): """ g: Output of Parallel Encoder block x: Output of Previous Decoder block """ filters = x.shape[-1] g_conv = BatchNormalization()(g) g_conv = Activation('relu')(g_conv) g_conv = Conv2D(filters, (3, 3), padding='same')(g_conv) g_pool = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(g_conv) x_conv = BatchNormalization()(x) x_conv = Activation('relu')(x_conv) x_conv = Conv2D(filters, (3, 3), padding='same')(x_conv) gc_sum = Add()([g_pool, x_conv]) gc_conv = BatchNormalization()(gc_sum) gc_conv = Activation('relu')(gc_conv) gc_conv = Conv2D(filters, (3, 3), padding='same')(gc_conv) gc_mul = Multiply()([gc_conv, x]) return gc_mul def build_model(input_size=512): n_filters = [16, 32, 64, 128, 256] inputs = Input((input_size, input_size, 3)) c0 = inputs c1 = stem_block(c0, n_filters[0], strides=1) c2 = resnet_block(c1, n_filters[1], strides=2) c3 = resnet_block(c2, n_filters[2], strides=2) c4 = resnet_block(c3, n_filters[3], strides=2) b1 = aspp_block(c4, n_filters[4]) d1 = attetion_block(c3, b1) d1 = UpSampling2D((2, 2))(d1) d1 = Concatenate()([d1, c3]) d1 = resnet_block(d1, n_filters[3]) d2 = attetion_block(c2, d1) d2 = UpSampling2D((2, 2))(d2) d2 = Concatenate()([d2, c2]) d2 = resnet_block(d2, n_filters[2]) d3 = attetion_block(c1, d2) d3 = UpSampling2D((2, 2))(d3) d3 = Concatenate()([d3, c1]) d3 = resnet_block(d3, n_filters[1]) outputs = aspp_block(d3, n_filters[0]) outputs = Conv2D(1, (1, 1), padding='same')(outputs) outputs = Activation('sigmoid')(outputs) model = Model(inputs, outputs) return model unet = build_model() unet.summary() from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD import tensorflow as tf smooth = 1.0 def dice_coef(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def dice_loss(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return 1.0 - (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def unet_loss(y_true, y_pred): bce = tf.keras.losses.BinaryCrossentropy()(y_true, y_pred) dice = dice_loss(y_true, y_pred) loss = bce + dice return loss import numpy as np trainImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_images.npy') trainMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_masks.npy') valImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_images.npy') valMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_masks.npy') from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_train = image.ImageDataGenerator() mask_datagen_train = image.ImageDataGenerator() image_datagen_train.fit(trainImages, augment=False, seed=seed) mask_datagen_train.fit(trainMasks, augment=False, seed=seed) x_train = image_datagen_train.flow(trainImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_train = mask_datagen_train.flow(trainMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_val = image.ImageDataGenerator() mask_datagen_val = image.ImageDataGenerator() image_datagen_val.fit(valImages, augment=False, seed=seed) mask_datagen_val.fit(valMasks, augment=False, seed=seed) x_val = image_datagen_val.flow(valImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_val = mask_datagen_val.flow(valMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) train_generator = zip(x_train, y_train) val_generator = zip(x_val, y_val) from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau lr = 0.0001 optimizer = Nadam(lr) metrics = [Recall(), Precision(), dice_coef, MeanIoU(num_classes=2)] unet.compile(loss=dice_loss, optimizer=optimizer, metrics=metrics) checkpoint1 = ModelCheckpoint('/kaggle/working/MDLChkP_Everything/unet.h5', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=False) checkpoint2 = ModelCheckpoint('/kaggle/working/MDLChkP_WeightsOnly/', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, min_lr=1e-06, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True) callbacks = [checkpoint1, checkpoint2, reduce_lr, early_stopping] train_steps = len(x_train) val_steps = len(x_val) history = unet.fit_generator(train_generator, validation_data=val_generator, validation_steps=val_steps, steps_per_epoch=train_steps, epochs=120, callbacks=callbacks) import numpy as np testImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/test_images.npy') testMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/test_masks.npy') from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_test = image.ImageDataGenerator() mask_datagen_test = image.ImageDataGenerator() image_datagen_test.fit(testImages, augment=False, seed=seed) mask_datagen_test.fit(testMasks, augment=False, seed=seed) x_test = image_datagen_test.flow(testImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_test = mask_datagen_test.flow(testMasks, batch_size=1, shuffle=True, seed=seed) preds_test = unet.predict(x_test, verbose=1) y_pred = (preds_test > 0.5).astype(np.float32) dice_scores = [] for i in range(len(y_pred)): k = y_test.next() arr = np.squeeze(k, axis=0) dice = dice_coef(arr, y_pred[i]) dice_scores.append(dice) average_dice = np.mean(dice_scores) import csv my_values = [tensor.numpy() for tensor in dice_scores] with open('dice_scores.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(my_values) unet.save('lastunetsave.h5') tf.saved_model.save(unet, 'unet_SMF') import tensorflow as tf unet = tf.keras.models.load_model('/kaggle/working/lastunetsave.h5', compile=False) preds_test = unet.predict(x_test, verbose=1) y_pred = (preds_test > 0.5).astype(np.float32) dice_scores = [] for i in range(len(y_pred)): k = y_test.next() arr = np.squeeze(k, axis=0) dice = dice_coef(arr, y_pred[i]) dice_scores.append(dice) print('\n Dice score: \t \n', dice) average_dice = np.mean(dice_scores) print('Average dice coefficient: ', average_dice) import csv my_values = [tensor.numpy() for tensor in dice_scores] with open('dice_scores2.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(my_values)
code
129020867/cell_14
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from keras.preprocessing import image from keras.preprocessing import image from keras.preprocessing import image from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.optimizers import Adam, Nadam, SGD import csv import numpy as np import numpy as np import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import numpy as np import tensorflow as tf def seed_everything(SEED): np.random.seed(SEED) tf.random.set_seed(SEED) seed = 42 seed_everything(seed) '\nResUNet++ architecture in Keras TensorFlow\n' import os import numpy as np import cv2 import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model def squeeze_excite_block(inputs, ratio=8): init = inputs channel_axis = -1 filters = init.shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) x = Multiply()([init, se]) return x def stem_block(x, n_filter, strides): x_init = x x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same')(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def resnet_block(x, n_filter, strides=1): x_init = x x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=1)(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def aspp_block(x, num_filters, rate_scale=1): x1 = Conv2D(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale), padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale), padding='same')(x) x2 = BatchNormalization()(x2) x3 = Conv2D(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale), padding='same')(x) x3 = BatchNormalization()(x3) x4 = Conv2D(num_filters, (3, 3), padding='same')(x) x4 = BatchNormalization()(x4) y = Add()([x1, x2, x3, x4]) y = Conv2D(num_filters, (1, 1), padding='same')(y) return y def attetion_block(g, x): """ g: Output of Parallel Encoder block x: Output of Previous Decoder block """ filters = x.shape[-1] g_conv = BatchNormalization()(g) g_conv = Activation('relu')(g_conv) g_conv = Conv2D(filters, (3, 3), padding='same')(g_conv) g_pool = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(g_conv) x_conv = BatchNormalization()(x) x_conv = Activation('relu')(x_conv) x_conv = Conv2D(filters, (3, 3), padding='same')(x_conv) gc_sum = Add()([g_pool, x_conv]) gc_conv = BatchNormalization()(gc_sum) gc_conv = Activation('relu')(gc_conv) gc_conv = Conv2D(filters, (3, 3), padding='same')(gc_conv) gc_mul = Multiply()([gc_conv, x]) return gc_mul def build_model(input_size=512): n_filters = [16, 32, 64, 128, 256] inputs = Input((input_size, input_size, 3)) c0 = inputs c1 = stem_block(c0, n_filters[0], strides=1) c2 = resnet_block(c1, n_filters[1], strides=2) c3 = resnet_block(c2, n_filters[2], strides=2) c4 = resnet_block(c3, n_filters[3], strides=2) b1 = aspp_block(c4, n_filters[4]) d1 = attetion_block(c3, b1) d1 = UpSampling2D((2, 2))(d1) d1 = Concatenate()([d1, c3]) d1 = resnet_block(d1, n_filters[3]) d2 = attetion_block(c2, d1) d2 = UpSampling2D((2, 2))(d2) d2 = Concatenate()([d2, c2]) d2 = resnet_block(d2, n_filters[2]) d3 = attetion_block(c1, d2) d3 = UpSampling2D((2, 2))(d3) d3 = Concatenate()([d3, c1]) d3 = resnet_block(d3, n_filters[1]) outputs = aspp_block(d3, n_filters[0]) outputs = Conv2D(1, (1, 1), padding='same')(outputs) outputs = Activation('sigmoid')(outputs) model = Model(inputs, outputs) return model unet = build_model() unet.summary() from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD import tensorflow as tf smooth = 1.0 def dice_coef(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def dice_loss(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return 1.0 - (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def unet_loss(y_true, y_pred): bce = tf.keras.losses.BinaryCrossentropy()(y_true, y_pred) dice = dice_loss(y_true, y_pred) loss = bce + dice return loss import numpy as np trainImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_images.npy') trainMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_masks.npy') valImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_images.npy') valMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_masks.npy') from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_train = image.ImageDataGenerator() mask_datagen_train = image.ImageDataGenerator() image_datagen_train.fit(trainImages, augment=False, seed=seed) mask_datagen_train.fit(trainMasks, augment=False, seed=seed) x_train = image_datagen_train.flow(trainImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_train = mask_datagen_train.flow(trainMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_val = image.ImageDataGenerator() mask_datagen_val = image.ImageDataGenerator() image_datagen_val.fit(valImages, augment=False, seed=seed) mask_datagen_val.fit(valMasks, augment=False, seed=seed) x_val = image_datagen_val.flow(valImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_val = mask_datagen_val.flow(valMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) train_generator = zip(x_train, y_train) val_generator = zip(x_val, y_val) from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau lr = 0.0001 optimizer = Nadam(lr) metrics = [Recall(), Precision(), dice_coef, MeanIoU(num_classes=2)] unet.compile(loss=dice_loss, optimizer=optimizer, metrics=metrics) checkpoint1 = ModelCheckpoint('/kaggle/working/MDLChkP_Everything/unet.h5', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=False) checkpoint2 = ModelCheckpoint('/kaggle/working/MDLChkP_WeightsOnly/', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, min_lr=1e-06, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True) callbacks = [checkpoint1, checkpoint2, reduce_lr, early_stopping] train_steps = len(x_train) val_steps = len(x_val) history = unet.fit_generator(train_generator, validation_data=val_generator, validation_steps=val_steps, steps_per_epoch=train_steps, epochs=120, callbacks=callbacks) import numpy as np testImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/test_images.npy') testMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/test_masks.npy') from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_test = image.ImageDataGenerator() mask_datagen_test = image.ImageDataGenerator() image_datagen_test.fit(testImages, augment=False, seed=seed) mask_datagen_test.fit(testMasks, augment=False, seed=seed) x_test = image_datagen_test.flow(testImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_test = mask_datagen_test.flow(testMasks, batch_size=1, shuffle=True, seed=seed) preds_test = unet.predict(x_test, verbose=1) y_pred = (preds_test > 0.5).astype(np.float32) dice_scores = [] for i in range(len(y_pred)): k = y_test.next() arr = np.squeeze(k, axis=0) dice = dice_coef(arr, y_pred[i]) dice_scores.append(dice) print('\n Dice score: \t \n', dice) average_dice = np.mean(dice_scores) print('Average dice coefficient: ', average_dice) import csv my_values = [tensor.numpy() for tensor in dice_scores] with open('dice_scores.csv', 'w', newline='') as f: writer = csv.writer(f) writer.writerow(my_values)
code
129020867/cell_10
[ "text_plain_output_1.png" ]
from keras.preprocessing import image from keras.preprocessing import image from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.models import Model from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.optimizers import Adam, Nadam, SGD import numpy as np import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import numpy as np import tensorflow as tf def seed_everything(SEED): np.random.seed(SEED) tf.random.set_seed(SEED) seed = 42 seed_everything(seed) '\nResUNet++ architecture in Keras TensorFlow\n' import os import numpy as np import cv2 import tensorflow as tf from tensorflow.keras.layers import * from tensorflow.keras.models import Model def squeeze_excite_block(inputs, ratio=8): init = inputs channel_axis = -1 filters = init.shape[channel_axis] se_shape = (1, 1, filters) se = GlobalAveragePooling2D()(init) se = Reshape(se_shape)(se) se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se) se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se) x = Multiply()([init, se]) return x def stem_block(x, n_filter, strides): x_init = x x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same')(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def resnet_block(x, n_filter, strides=1): x_init = x x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=strides)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Conv2D(n_filter, (3, 3), padding='same', strides=1)(x) s = Conv2D(n_filter, (1, 1), padding='same', strides=strides)(x_init) s = BatchNormalization()(s) x = Add()([x, s]) x = squeeze_excite_block(x) return x def aspp_block(x, num_filters, rate_scale=1): x1 = Conv2D(num_filters, (3, 3), dilation_rate=(6 * rate_scale, 6 * rate_scale), padding='same')(x) x1 = BatchNormalization()(x1) x2 = Conv2D(num_filters, (3, 3), dilation_rate=(12 * rate_scale, 12 * rate_scale), padding='same')(x) x2 = BatchNormalization()(x2) x3 = Conv2D(num_filters, (3, 3), dilation_rate=(18 * rate_scale, 18 * rate_scale), padding='same')(x) x3 = BatchNormalization()(x3) x4 = Conv2D(num_filters, (3, 3), padding='same')(x) x4 = BatchNormalization()(x4) y = Add()([x1, x2, x3, x4]) y = Conv2D(num_filters, (1, 1), padding='same')(y) return y def attetion_block(g, x): """ g: Output of Parallel Encoder block x: Output of Previous Decoder block """ filters = x.shape[-1] g_conv = BatchNormalization()(g) g_conv = Activation('relu')(g_conv) g_conv = Conv2D(filters, (3, 3), padding='same')(g_conv) g_pool = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(g_conv) x_conv = BatchNormalization()(x) x_conv = Activation('relu')(x_conv) x_conv = Conv2D(filters, (3, 3), padding='same')(x_conv) gc_sum = Add()([g_pool, x_conv]) gc_conv = BatchNormalization()(gc_sum) gc_conv = Activation('relu')(gc_conv) gc_conv = Conv2D(filters, (3, 3), padding='same')(gc_conv) gc_mul = Multiply()([gc_conv, x]) return gc_mul def build_model(input_size=512): n_filters = [16, 32, 64, 128, 256] inputs = Input((input_size, input_size, 3)) c0 = inputs c1 = stem_block(c0, n_filters[0], strides=1) c2 = resnet_block(c1, n_filters[1], strides=2) c3 = resnet_block(c2, n_filters[2], strides=2) c4 = resnet_block(c3, n_filters[3], strides=2) b1 = aspp_block(c4, n_filters[4]) d1 = attetion_block(c3, b1) d1 = UpSampling2D((2, 2))(d1) d1 = Concatenate()([d1, c3]) d1 = resnet_block(d1, n_filters[3]) d2 = attetion_block(c2, d1) d2 = UpSampling2D((2, 2))(d2) d2 = Concatenate()([d2, c2]) d2 = resnet_block(d2, n_filters[2]) d3 = attetion_block(c1, d2) d3 = UpSampling2D((2, 2))(d3) d3 = Concatenate()([d3, c1]) d3 = resnet_block(d3, n_filters[1]) outputs = aspp_block(d3, n_filters[0]) outputs = Conv2D(1, (1, 1), padding='same')(outputs) outputs = Activation('sigmoid')(outputs) model = Model(inputs, outputs) return model unet = build_model() unet.summary() from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD import tensorflow as tf smooth = 1.0 def dice_coef(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def dice_loss(y_true, y_pred): y_true_f = tf.keras.layers.Flatten()(y_true) y_pred_f = tf.keras.layers.Flatten()(y_pred) intersection = tf.reduce_sum(y_true_f * y_pred_f) return 1.0 - (2.0 * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth) def unet_loss(y_true, y_pred): bce = tf.keras.losses.BinaryCrossentropy()(y_true, y_pred) dice = dice_loss(y_true, y_pred) loss = bce + dice return loss import numpy as np trainImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_images.npy') trainMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/train_masks.npy') valImages = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_images.npy') valMasks = np.load('/kaggle/input/isib2016-allnpdata-noclahe-noaug/val_masks.npy') from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_train = image.ImageDataGenerator() mask_datagen_train = image.ImageDataGenerator() image_datagen_train.fit(trainImages, augment=False, seed=seed) mask_datagen_train.fit(trainMasks, augment=False, seed=seed) x_train = image_datagen_train.flow(trainImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_train = mask_datagen_train.flow(trainMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) from keras.preprocessing import image BATCH_SIZE = 8 image_datagen_val = image.ImageDataGenerator() mask_datagen_val = image.ImageDataGenerator() image_datagen_val.fit(valImages, augment=False, seed=seed) mask_datagen_val.fit(valMasks, augment=False, seed=seed) x_val = image_datagen_val.flow(valImages, batch_size=BATCH_SIZE, shuffle=True, seed=seed) y_val = mask_datagen_val.flow(valMasks, batch_size=BATCH_SIZE, shuffle=True, seed=seed) train_generator = zip(x_train, y_train) val_generator = zip(x_val, y_val) from tensorflow.keras.metrics import Precision, Recall, MeanIoU from tensorflow.keras.optimizers import Adam, Nadam, SGD from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau lr = 0.0001 optimizer = Nadam(lr) metrics = [Recall(), Precision(), dice_coef, MeanIoU(num_classes=2)] unet.compile(loss=dice_loss, optimizer=optimizer, metrics=metrics) checkpoint1 = ModelCheckpoint('/kaggle/working/MDLChkP_Everything/unet.h5', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=False) checkpoint2 = ModelCheckpoint('/kaggle/working/MDLChkP_WeightsOnly/', verbose=1, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True) reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, min_lr=1e-06, verbose=1) early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True) callbacks = [checkpoint1, checkpoint2, reduce_lr, early_stopping] train_steps = len(x_train) val_steps = len(x_val) history = unet.fit_generator(train_generator, validation_data=val_generator, validation_steps=val_steps, steps_per_epoch=train_steps, epochs=120, callbacks=callbacks)
code
90147502/cell_21
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train_df['Sentiment'].value_counts()
code
90147502/cell_13
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape
code
90147502/cell_9
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape
code
90147502/cell_57
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import tensorflow as tf train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') import tensorflow as tf embedding_dimension = 100 input_val = len(tokenize.word_index) + 1 model_ANN = tf.keras.Sequential([tf.keras.layers.Embedding(input_val, embedding_dimension, input_length=max_sequence_len), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')])
code
90147502/cell_23
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() sns.countplot(x='Sentiment', data=train)
code
90147502/cell_6
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.head()
code
90147502/cell_48
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') print(X_train.shape) print(X_test.shape)
code
90147502/cell_11
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.info()
code
90147502/cell_60
[ "text_plain_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import tensorflow as tf train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') early_stopping = EarlyStopping(min_delta=0.001, mode='max', monitor='val_acc', patience=2) callback = [early_stopping] import tensorflow as tf embedding_dimension = 100 input_val = len(tokenize.word_index) + 1 model_ANN = tf.keras.Sequential([tf.keras.layers.Embedding(input_val, embedding_dimension, input_length=max_sequence_len), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')]) model_ANN.summary() model_ANN.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model_ANN.fit(X_train, y_train, batch_size=512, epochs=50, verbose=1, callbacks=callback)
code
90147502/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90147502/cell_7
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.info()
code
90147502/cell_18
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum() test_df.isnull().any().any()
code
90147502/cell_62
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import EarlyStopping from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import tensorflow as tf train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') early_stopping = EarlyStopping(min_delta=0.001, mode='max', monitor='val_acc', patience=2) callback = [early_stopping] import tensorflow as tf embedding_dimension = 100 input_val = len(tokenize.word_index) + 1 model_ANN = tf.keras.Sequential([tf.keras.layers.Embedding(input_val, embedding_dimension, input_length=max_sequence_len), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')]) model_ANN.summary() model_ANN.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model_ANN.fit(X_train, y_train, batch_size=512, epochs=50, verbose=1, callbacks=callback) predict_x = model_ANN.predict(X_test) classes_x_ANN = np.argmax(predict_x, axis=1) classes_x_ANN
code
90147502/cell_58
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import tensorflow as tf train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') import tensorflow as tf embedding_dimension = 100 input_val = len(tokenize.word_index) + 1 model_ANN = tf.keras.Sequential([tf.keras.layers.Embedding(input_val, embedding_dimension, input_length=max_sequence_len), tf.keras.layers.Flatten(), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dense(5, activation='softmax')]) model_ANN.summary()
code
90147502/cell_28
[ "text_plain_output_1.png" ]
import string import string import string import re string.punctuation
code
90147502/cell_8
[ "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.describe()
code
90147502/cell_15
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum()
code
90147502/cell_16
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum()
code
90147502/cell_47
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train['Phrase'] y_train = train['Sentiment'] tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test['Phrase'] X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_sequence_len = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_sequence_len, padding='pre') X_test = pad_sequences(X_test, max_sequence_len, padding='pre') X_train
code
90147502/cell_17
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any()
code
90147502/cell_24
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() train_df['Phrase'][0]
code
90147502/cell_10
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.head()
code
90147502/cell_12
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.describe()
code
90147502/cell_36
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords stopwords.words('english')
code
72105010/cell_13
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') features = train.drop(columns=['target', 'id'], axis=1) test = test.drop(columns=['id']) y = train['target'] ordinal_enc = OrdinalEncoder() categorical = list(features.select_dtypes(include=[object])) X = features.copy() X_test = test.copy() X[categorical] = ordinal_enc.fit_transform(features[categorical]) X_test[categorical] = ordinal_enc.transform(test[categorical]) ordinal_enc = OrdinalEncoder() categorical = list(features.select_dtypes(include=[object])) X_scale = features.copy() X_test_scale = test.copy() X_scale[categorical] = ordinal_enc.fit_transform(features[categorical]) X_test_scale[categorical] = ordinal_enc.transform(test[categorical]) numerical = list(features.select_dtypes(exclude=[object])) scaler = MinMaxScaler() X_scale[numerical] = scaler.fit_transform(features[numerical]) X_test_scale[numerical] = scaler.transform(test[numerical]) X_scale
code
72105010/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') print(train.shape) print(test.shape)
code
72105010/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') sns.heatmap(train.isnull())
code
72105010/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns import matplotlib.pyplot as plt from sklearn.preprocessing import OrdinalEncoder from sklearn.model_selection import train_test_split from sklearn.preprocessing import MinMaxScaler from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from xgboost import XGBRegressor from lightgbm import LGBMRegressor from sklearn.metrics import mean_squared_error, r2_score
code
72105010/cell_11
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') features = train.drop(columns=['target', 'id'], axis=1) test = test.drop(columns=['id']) y = train['target'] ordinal_enc = OrdinalEncoder() categorical = list(features.select_dtypes(include=[object])) X = features.copy() X_test = test.copy() X[categorical] = ordinal_enc.fit_transform(features[categorical]) X_test[categorical] = ordinal_enc.transform(test[categorical]) X
code
72105010/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72105010/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') sns.pairplot(train)
code
72105010/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import mean_squared_error, r2_score model_base = LinearRegression() model_base.fit(X_train, y_train) preds_valid_base = model_base.predict(X_valid) print('MAE', mean_squared_error(y_valid, preds_valid_base, squared=False)) print('r2', r2_score(y_valid, preds_valid_base))
code
72105010/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv') test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv') train.info()
code
33110459/cell_2
[ "text_plain_output_1.png", "image_output_1.png" ]
!curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py !python pytorch-xla-env-setup.py --apt-packages libomp5 libopenblas-dev
code
33110459/cell_7
[ "text_plain_output_1.png" ]
import os import pandas as pd root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) CLASSES = ['pink primrose', 'hard-leaved pocket orchid', 'canterbury bells', 'sweet pea', 'wild geranium', 'tiger lily', 'moon orchid', 'bird of paradise', 'monkshood', 'globe thistle', 'snapdragon', "colt's foot", 'king protea', 'spear thistle', 'yellow iris', 'globe-flower', 'purple coneflower', 'peruvian lily', 'balloon flower', 'giant white arum lily', 'fire lily', 'pincushion flower', 'fritillary', 'red ginger', 'grape hyacinth', 'corn poppy', 'prince of wales feathers', 'stemless gentian', 'artichoke', 'sweet william', 'carnation', 'garden phlox', 'love in the mist', 'cosmos', 'alpine sea holly', 'ruby-lipped cattleya', 'cape flower', 'great masterwort', 'siam tulip', 'lenten rose', 'barberton daisy', 'daffodil', 'sword lily', 'poinsettia', 'bolero deep blue', 'wallflower', 'marigold', 'buttercup', 'daisy', 'common dandelion', 'petunia', 'wild pansy', 'primula', 'sunflower', 'lilac hibiscus', 'bishop of llandaff', 'gaura', 'geranium', 'orange dahlia', 'pink-yellow dahlia', 'cautleya spicata', 'japanese anemone', 'black-eyed susan', 'silverbush', 'californian poppy', 'osteospermum', 'spring crocus', 'iris', 'windflower', 'tree poppy', 'gazania', 'azalea', 'water lily', 'rose', 'thorn apple', 'morning glory', 'passion flower', 'lotus', 'toad lily', 'anthurium', 'frangipani', 'clematis', 'hibiscus', 'columbine', 'desert-rose', 'tree mallow', 'magnolia', 'cyclamen ', 'watercress', 'canna lily', 'hippeastrum ', 'bee balm', 'pink quill', 'foxglove', 'bougainvillea', 'camellia', 'mallow', 'mexican petunia', 'bromelia', 'blanket flower', 'trumpet creeper', 'blackberry lily', 'common tulip', 'wild rose'] print('num class:', len(CLASSES)) train_df['label'] = train_df['class'].apply(lambda x: CLASSES.index(x)) val_df['label'] = val_df['class'].apply(lambda x: CLASSES.index(x))
code
33110459/cell_18
[ "text_plain_output_1.png" ]
from PIL import Image from collections import deque from torch.utils.data import Dataset, DataLoader from torch.utils.data.distributed import DistributedSampler import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import time import torch import torch.nn as nn import torch.optim as optim import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torchvision.models as models import torchvision.transforms as T root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) test_df = pd.DataFrame() f = 'jpeg-224x224' images = os.listdir(os.path.join(root, f, 'test')) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = 'unknown' tmp_df['folder'] = f tmp_df['type'] = 'test' test_df = test_df.append(tmp_df, ignore_index=True) class flowerDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long) return (img_tensor, target_tensor) def __len__(self): return len(self.df) class testDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) return (img_tensor, self.df.iloc[idx]['image_name'][:-5]) def __len__(self): return len(self.df) train_dataset = flowerDataset(train_df) print(train_dataset.__len__()) train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True) train_iter = iter(train_loader) images, labels = next(train_iter) print(images.size()) print(labels.size()) plot_size = 32 fig = plt.figure(figsize=(25, 10)) for idx in np.arange(plot_size): ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[]) ax.imshow(np.transpose(images[idx], (1, 2, 0))) ax.set_title(classes[labels[idx].item()]) def train_net(): torch.manual_seed(FLAGS['seed']) device = xm.xla_device() world_size = xm.xrt_world_size() train_dataset = flowerDataset(train_df) train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True) val_dataset = flowerDataset(val_df) val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True) model = models.resnet18() model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth')) model.fc = nn.Linear(512, 104) model.to(device) optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005) loss_fn = torch.nn.CrossEntropyLoss() def train_loop_fn(loader): tracker = xm.RateTracker() model.train() loss_window = deque(maxlen=FLAGS['log_steps']) for x, (data, target) in enumerate(loader): optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss_window.append(loss.item()) loss.backward() xm.optimizer_step(optimizer) tracker.add(FLAGS['batch_size']) def val_loop_fn(loader): total_samples, correct = (0, 0) model.eval() for data, target in loader: with torch.no_grad(): output = model(data) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() total_samples += data.size()[0] accuracy = 100.0 * correct / total_samples return accuracy for epoch in range(1, FLAGS['num_epochs'] + 1): para_loader = pl.ParallelLoader(train_loader, [device]) train_loop_fn(para_loader.per_device_loader(device)) para_loader = pl.ParallelLoader(val_loader, [device]) accuracy = val_loop_fn(para_loader.per_device_loader(device)) best_accuracy = 0.0 if accuracy > best_accuracy: xm.save(model.state_dict(), 'trained_resnet18_model.pth') best_accuracy = accuracy def _mp_fn(rank, flags): global FLAGS FLAGS = flags torch.set_default_tensor_type('torch.FloatTensor') train_start = time.time() train_net() elapsed_train_time = time.time() - train_start model = models.resnet18() model.fc = nn.Linear(512, 104) model.load_state_dict(torch.load('trained_resnet18_model.pth')) device = xm.xla_device() model.to(device) model.eval() batch_size = 128 test_dataset = testDataset(test_df) test_loader = DataLoader(test_dataset, batch_size=batch_size) n = test_dataset.__len__() label = [] id = [] for x, (images, names) in enumerate(test_loader): images = images.to(device) with torch.no_grad(): output = model(images) preds = list(output.max(1)[1].cpu().numpy()) label.extend(preds) id.extend(names) print('\rProcess {} %'.format(round(100 * x * batch_size / n)), end='') print('\rProcess 100 %') predictions = pd.DataFrame(data={'id': id, 'label': label})
code
33110459/cell_8
[ "text_plain_output_1.png" ]
import os import pandas as pd root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) test_df = pd.DataFrame() f = 'jpeg-224x224' images = os.listdir(os.path.join(root, f, 'test')) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = 'unknown' tmp_df['folder'] = f tmp_df['type'] = 'test' test_df = test_df.append(tmp_df, ignore_index=True) print('test:', test_df.shape)
code
33110459/cell_16
[ "text_plain_output_1.png" ]
from PIL import Image from collections import deque from torch.utils.data import Dataset, DataLoader from torch.utils.data.distributed import DistributedSampler import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import time import torch import torch.nn as nn import torch.optim as optim import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torchvision.models as models import torchvision.transforms as T root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) test_df = pd.DataFrame() f = 'jpeg-224x224' images = os.listdir(os.path.join(root, f, 'test')) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = 'unknown' tmp_df['folder'] = f tmp_df['type'] = 'test' test_df = test_df.append(tmp_df, ignore_index=True) class flowerDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long) return (img_tensor, target_tensor) def __len__(self): return len(self.df) class testDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) return (img_tensor, self.df.iloc[idx]['image_name'][:-5]) def __len__(self): return len(self.df) train_dataset = flowerDataset(train_df) print(train_dataset.__len__()) train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True) train_iter = iter(train_loader) images, labels = next(train_iter) print(images.size()) print(labels.size()) plot_size = 32 fig = plt.figure(figsize=(25, 10)) for idx in np.arange(plot_size): ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[]) ax.imshow(np.transpose(images[idx], (1, 2, 0))) ax.set_title(classes[labels[idx].item()]) def train_net(): torch.manual_seed(FLAGS['seed']) device = xm.xla_device() world_size = xm.xrt_world_size() train_dataset = flowerDataset(train_df) train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True) val_dataset = flowerDataset(val_df) val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True) model = models.resnet18() model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth')) model.fc = nn.Linear(512, 104) model.to(device) optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005) loss_fn = torch.nn.CrossEntropyLoss() def train_loop_fn(loader): tracker = xm.RateTracker() model.train() loss_window = deque(maxlen=FLAGS['log_steps']) for x, (data, target) in enumerate(loader): optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss_window.append(loss.item()) loss.backward() xm.optimizer_step(optimizer) tracker.add(FLAGS['batch_size']) def val_loop_fn(loader): total_samples, correct = (0, 0) model.eval() for data, target in loader: with torch.no_grad(): output = model(data) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() total_samples += data.size()[0] accuracy = 100.0 * correct / total_samples return accuracy for epoch in range(1, FLAGS['num_epochs'] + 1): para_loader = pl.ParallelLoader(train_loader, [device]) train_loop_fn(para_loader.per_device_loader(device)) para_loader = pl.ParallelLoader(val_loader, [device]) accuracy = val_loop_fn(para_loader.per_device_loader(device)) best_accuracy = 0.0 if accuracy > best_accuracy: xm.save(model.state_dict(), 'trained_resnet18_model.pth') best_accuracy = accuracy def _mp_fn(rank, flags): global FLAGS FLAGS = flags torch.set_default_tensor_type('torch.FloatTensor') train_start = time.time() train_net() elapsed_train_time = time.time() - train_start model = models.resnet18() model.fc = nn.Linear(512, 104) model.load_state_dict(torch.load('trained_resnet18_model.pth')) device = xm.xla_device() model.to(device) model.eval() print(device)
code
33110459/cell_14
[ "text_plain_output_1.png" ]
from PIL import Image from collections import deque from torch.utils.data import Dataset, DataLoader from torch.utils.data.distributed import DistributedSampler import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import time import torch import torch.nn as nn import torch.optim as optim import torch_xla.core.xla_model as xm import torch_xla.distributed.parallel_loader as pl import torch_xla.distributed.xla_multiprocessing as xmp import torchvision.models as models import torchvision.transforms as T root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) test_df = pd.DataFrame() f = 'jpeg-224x224' images = os.listdir(os.path.join(root, f, 'test')) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = 'unknown' tmp_df['folder'] = f tmp_df['type'] = 'test' test_df = test_df.append(tmp_df, ignore_index=True) class flowerDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.Resize((224, 224)), T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['class'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) target_tensor = torch.tensor(self.df.iloc[idx]['label'], dtype=torch.long) return (img_tensor, target_tensor) def __len__(self): return len(self.df) class testDataset(Dataset): def __init__(self, df, root='../input/104-flowers-garden-of-eden'): self.df = df self.root = root self.transforms = T.Compose([T.ToTensor()]) def __getitem__(self, idx): img_path = os.path.join(self.root, self.df.iloc[idx]['folder'], self.df.iloc[idx]['type'], self.df.iloc[idx]['image_name']) img = Image.open(img_path) img_tensor = self.transforms(img) return (img_tensor, self.df.iloc[idx]['image_name'][:-5]) def __len__(self): return len(self.df) train_dataset = flowerDataset(train_df) print(train_dataset.__len__()) train_loader = DataLoader(train_dataset, batch_size = 32, shuffle = True, drop_last = True) train_iter = iter(train_loader) images, labels = next(train_iter) print(images.size()) print(labels.size()) plot_size = 32 fig = plt.figure(figsize=(25, 10)) for idx in np.arange(plot_size): ax = fig.add_subplot(4, plot_size/4, idx+1, xticks=[], yticks=[]) ax.imshow(np.transpose(images[idx], (1, 2, 0))) ax.set_title(classes[labels[idx].item()]) def train_net(): torch.manual_seed(FLAGS['seed']) device = xm.xla_device() world_size = xm.xrt_world_size() train_dataset = flowerDataset(train_df) train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) train_loader = DataLoader(train_dataset, batch_size=FLAGS['batch_size'], sampler=train_sampler, num_workers=FLAGS['num_workers'], drop_last=True) val_dataset = flowerDataset(val_df) val_sampler = DistributedSampler(val_dataset, num_replicas=world_size, rank=xm.get_ordinal(), shuffle=True) val_loader = DataLoader(val_dataset, batch_size=FLAGS['batch_size'], sampler=val_sampler, num_workers=FLAGS['num_workers'], drop_last=True) model = models.resnet18() model.load_state_dict(torch.load('/kaggle/input/resnet18/resnet18.pth')) model.fc = nn.Linear(512, 104) model.to(device) optimizer = optim.SGD(model.parameters(), lr=FLAGS['learning_rate'] * world_size, momentum=FLAGS['momentum'], weight_decay=0.0005) loss_fn = torch.nn.CrossEntropyLoss() def train_loop_fn(loader): tracker = xm.RateTracker() model.train() loss_window = deque(maxlen=FLAGS['log_steps']) for x, (data, target) in enumerate(loader): optimizer.zero_grad() output = model(data) loss = loss_fn(output, target) loss_window.append(loss.item()) loss.backward() xm.optimizer_step(optimizer) tracker.add(FLAGS['batch_size']) def val_loop_fn(loader): total_samples, correct = (0, 0) model.eval() for data, target in loader: with torch.no_grad(): output = model(data) pred = output.max(1, keepdim=True)[1] correct += pred.eq(target.view_as(pred)).sum().item() total_samples += data.size()[0] accuracy = 100.0 * correct / total_samples return accuracy for epoch in range(1, FLAGS['num_epochs'] + 1): para_loader = pl.ParallelLoader(train_loader, [device]) train_loop_fn(para_loader.per_device_loader(device)) para_loader = pl.ParallelLoader(val_loader, [device]) accuracy = val_loop_fn(para_loader.per_device_loader(device)) best_accuracy = 0.0 if accuracy > best_accuracy: xm.save(model.state_dict(), 'trained_resnet18_model.pth') best_accuracy = accuracy def _mp_fn(rank, flags): global FLAGS FLAGS = flags torch.set_default_tensor_type('torch.FloatTensor') train_start = time.time() train_net() elapsed_train_time = time.time() - train_start FLAGS = {} FLAGS['seed'] = 1 FLAGS['num_workers'] = 4 FLAGS['num_cores'] = 8 FLAGS['num_epochs'] = 10 FLAGS['log_steps'] = 50 FLAGS['batch_size'] = 16 FLAGS['learning_rate'] = 0.0001 FLAGS['momentum'] = 0.9 xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS['num_cores'], start_method='fork')
code
33110459/cell_10
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader import matplotlib.pyplot as plt import numpy as np import os import pandas as pd root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) test_df = pd.DataFrame() f = 'jpeg-224x224' images = os.listdir(os.path.join(root, f, 'test')) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = 'unknown' tmp_df['folder'] = f tmp_df['type'] = 'test' test_df = test_df.append(tmp_df, ignore_index=True) train_dataset = flowerDataset(train_df) print(train_dataset.__len__()) train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, drop_last=True) train_iter = iter(train_loader) images, labels = next(train_iter) print(images.size()) print(labels.size()) plot_size = 32 fig = plt.figure(figsize=(25, 10)) for idx in np.arange(plot_size): ax = fig.add_subplot(4, plot_size / 4, idx + 1, xticks=[], yticks=[]) ax.imshow(np.transpose(images[idx], (1, 2, 0))) ax.set_title(classes[labels[idx].item()])
code
33110459/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd root = '../input/104-flowers-garden-of-eden' train_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'train')) for c in classes: images = os.listdir(os.path.join(root, f, 'train', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'train' train_df = train_df.append(tmp_df, ignore_index=True) print('train:', train_df.shape) val_df = pd.DataFrame() folder = os.listdir(root) for f in folder: classes = os.listdir(os.path.join(root, f, 'val')) for c in classes: images = os.listdir(os.path.join(root, f, 'val', c)) tmp_df = pd.DataFrame(images, columns=['image_name']) tmp_df['class'] = c tmp_df['folder'] = f tmp_df['type'] = 'val' val_df = val_df.append(tmp_df, ignore_index=True) print('val:', val_df.shape)
code
105188182/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df.plot(kind='box', subplots=True, figsize=(18, 15), layout=(5, 5)) plt.show()
code
105188182/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) plt.figure(figsize=(15, 8)) sns.scatterplot(x='Arrival Delay', y='Satisfaction', data=df)
code
105188182/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns
code
105188182/cell_23
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) plt.figure(figsize=(15, 8)) sns.scatterplot(x='Departure Delay', y='Satisfaction', data=df)
code
105188182/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts()
code
105188182/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() plt.figure(figsize=(15, 8)) sns.scatterplot(x='Departure Delay', y='Satisfaction', data=df)
code
105188182/cell_40
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts() df.info()
code
105188182/cell_39
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts() df['Class'].value_counts()
code
105188182/cell_41
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts() df.plot(kind='box', subplots=True, figsize=(18, 15), layout=(5, 5)) plt.show()
code
105188182/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() plt.figure(figsize=(20, 10)) sns.heatmap(df.isnull())
code
105188182/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum()
code
105188182/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() plt.figure(figsize=(15, 8)) sns.scatterplot(x='Flight Distance', y='Satisfaction', data=df)
code
105188182/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() plt.figure(figsize=(20, 10)) sns.heatmap(df.isnull())
code
105188182/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() plt.figure(figsize=(15, 8)) sns.scatterplot(x='Flight Distance', y='Satisfaction', data=df)
code
105188182/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum()
code
105188182/cell_35
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts() df['Customer Type'].value_counts()
code
105188182/cell_31
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts()
code
105188182/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df[df['Departure Delay'] > 600].shape
code
105188182/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum()
code
105188182/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) plt.figure(figsize=(15, 8)) sns.scatterplot(x='Arrival Delay', y='Satisfaction', data=df)
code
105188182/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape df.isnull().sum() df.isnull().sum() df = df.drop(df.loc[df['Flight Distance'] > 4200].index) df.isnull().sum() df = df.drop(df.loc[df['Departure Delay'] > 800].index) df = df.drop(df.loc[df['Arrival Delay'] > 700].index) df.Satisfaction.value_counts() df.Gender.value_counts() df['Type of Travel'].value_counts()
code
105188182/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/airline-passenger-satisfaction/airline_passenger_satisfaction.csv') df.columns df.shape
code
74048227/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') train3 = train.dropna(axis='columns') training_missing_val_count_by_column = train.isnull().sum() plt.bar(np.arange(0, len(training_missing_val_count_by_column), 1), training_missing_val_count_by_column) plt.xlabel('column') plt.ylabel('Number of missing values') plt.show() print(training_missing_val_count_by_column.describe()) print('\n', time.time())
code
74048227/cell_9
[ "image_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() train.head()
code
74048227/cell_20
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') train3 = train.dropna(axis='columns') training_missing_val_count_by_column = train.isnull().sum() imputer = SimpleImputer(strategy='mean') train_imputed = pd.DataFrame(imputer.fit_transform(train)) train_imputed.columns = train.columns train = train_imputed corr = train.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) corr_matrix = train.corr().abs() high_corr = np.where(corr_matrix > 0.02) high_corr = [(corr_matrix.columns[x], corr_matrix.columns[y]) for x, y in zip(*high_corr) if x != y and x < y] print('high correlation', high_corr) featuresofinterest = ['f6', 'f15', 'f32', 'f34', 'f36', 'f45', 'f46', 'f51', 'f57', 'f86', 'f90', 'f97', 'f111'] print('\n', time.time())
code
74048227/cell_19
[ "text_html_output_1.png" ]
from sklearn.impute import SimpleImputer import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() pd.set_option('display.max_rows', None) train.describe().T train2 = train.dropna(axis='rows') train3 = train.dropna(axis='columns') training_missing_val_count_by_column = train.isnull().sum() imputer = SimpleImputer(strategy='mean') train_imputed = pd.DataFrame(imputer.fit_transform(train)) train_imputed.columns = train.columns train = train_imputed corr = train.corr() mask = np.triu(np.ones_like(corr, dtype=bool)) plt.figure(figsize=(15, 15)) plt.title('Correlation matrix for Train data') sns.heatmap(corr, mask=mask, annot=False, linewidths=0.5, square=True, cbar_kws={'shrink': 0.6}) plt.show() print('\n', time.time())
code
74048227/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') print('How much data was imported?') print('training data shape ;', train.shape) print('test data shape ;', test.shape) print('\nHow much data is missing?') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() print('\nmissing training data :\n', training_missing_val_count_by_column) print('\nmissing test data :\n', test_missing_val_count_by_column) print('\noverview complete : ', time.time())
code
74048227/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import time train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv', index_col='id') train_df = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv', index_col='id') training_missing_val_count_by_column = train.isnull().values.sum() test_missing_val_count_by_column = test.isnull().values.sum() print('train info ;\n', train.info()) print('test info ;\n', test.info()) print(time.time())
code