path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17141241/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.isnull().sum() test.head()
code
17141241/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.isnull().sum() train['Embarked'].value_counts()
code
74058807/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.figure(figsize=(24, 18)) for i in range(0, 32): plt.subplot(8, 8, 2 * i + 1) plt.imshow(images_gray[i], cmap='gray') plt.title(f'Input {i + 1}') plt.axis('off') plt.subplot(8, 8, 2 * i + 2) plt.imshow(images_col[i]) plt.title(f'Output {i + 1}') plt.axis('off') plt.show()
code
74058807/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
img_paths = [] for r, d, f in os.walk(DIR_PATH): for file in f: img_paths.append(os.path.join(r, file)) np.random.shuffle(img_paths)
code
74058807/cell_25
[ "text_plain_output_1.png" ]
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, MaxPooling2D, BatchNormalization, UpSampling2D from keras.models import Sequential, Model import tensorflow as tf SEED = 42 INPUT_DIM = (144, 144, 1) BATCH_SIZE = 128 EPOCHS = 100 LOSS = 'mse' METRICS = ['accuracy'] OPTIMIZER = 'adam' datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) train_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='training') test_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='validation') checkpoint = tf.keras.callbacks.ModelCheckpoint(monitor='loss', mode='min', save_best_only=True, save_weights_only=True, filepath='./modelcheck') model_callbacks = [checkpoint] def Colorize(): encoder_input = Input(shape=INPUT_DIM) encoder_output = Conv2D(64, (3, 3), activation='relu', padding='same', strides=2)(encoder_input) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', strides=2)(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='valid', strides=3)(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = UpSampling2D((3, 3))(decoder_output) decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(3, (3, 3), activation='tanh', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) model = Model(inputs=encoder_input, outputs=decoder_output) return model model = Colorize() model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) model.summary() history = model.fit(train_datagen, batch_size=BATCH_SIZE, validation_data=test_datagen, epochs=EPOCHS, callbacks=model_callbacks)
code
74058807/cell_11
[ "text_plain_output_1.png" ]
images_col = [] images_gray = [] for i, img_path in tqdm(enumerate(img_paths)): img = np.asarray(Image.open(img_path)) if img.shape == (150, 150, 3): resized_image = tf.image.resize(img, [144, 144]) images_col.append(resized_image) images_gray.append(tf.image.rgb_to_grayscale(resized_image)) images_col = np.asarray(images_col, dtype='int32') images_gray = np.asarray(images_gray, dtype='int32')
code
74058807/cell_28
[ "text_plain_output_1.png" ]
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, MaxPooling2D, BatchNormalization, UpSampling2D from keras.models import Sequential, Model import matplotlib.pyplot as plt import matplotlib.pyplot as plt import tensorflow as tf SEED = 42 for i in range(0, 32): plt.axis('off') plt.axis('off') INPUT_DIM = (144, 144, 1) BATCH_SIZE = 128 EPOCHS = 100 LOSS = 'mse' METRICS = ['accuracy'] OPTIMIZER = 'adam' datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) train_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='training') test_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='validation') checkpoint = tf.keras.callbacks.ModelCheckpoint(monitor='loss', mode='min', save_best_only=True, save_weights_only=True, filepath='./modelcheck') model_callbacks = [checkpoint] def Colorize(): encoder_input = Input(shape=INPUT_DIM) encoder_output = Conv2D(64, (3, 3), activation='relu', padding='same', strides=2)(encoder_input) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', strides=2)(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='valid', strides=3)(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = UpSampling2D((3, 3))(decoder_output) decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(3, (3, 3), activation='tanh', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) model = Model(inputs=encoder_input, outputs=decoder_output) return model model = Colorize() model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) model.summary() history = model.fit(train_datagen, batch_size=BATCH_SIZE, validation_data=test_datagen, epochs=EPOCHS, callbacks=model_callbacks) preds = model.predict(test_datagen) preds.shape plt.imshow(preds[0])
code
74058807/cell_24
[ "image_output_1.png" ]
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, MaxPooling2D, BatchNormalization, UpSampling2D from keras.models import Sequential, Model INPUT_DIM = (144, 144, 1) BATCH_SIZE = 128 EPOCHS = 100 LOSS = 'mse' METRICS = ['accuracy'] OPTIMIZER = 'adam' def Colorize(): encoder_input = Input(shape=INPUT_DIM) encoder_output = Conv2D(64, (3, 3), activation='relu', padding='same', strides=2)(encoder_input) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', strides=2)(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='valid', strides=3)(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = UpSampling2D((3, 3))(decoder_output) decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(3, (3, 3), activation='tanh', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) model = Model(inputs=encoder_input, outputs=decoder_output) return model model = Colorize() model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) model.summary()
code
74058807/cell_10
[ "text_plain_output_1.png" ]
len(img_paths)
code
74058807/cell_27
[ "text_plain_output_1.png" ]
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPool2D, BatchNormalization, MaxPooling2D, BatchNormalization, UpSampling2D from keras.models import Sequential, Model import tensorflow as tf SEED = 42 INPUT_DIM = (144, 144, 1) BATCH_SIZE = 128 EPOCHS = 100 LOSS = 'mse' METRICS = ['accuracy'] OPTIMIZER = 'adam' datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2) train_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='training') test_datagen = datagen.flow(images_gray, images_col, batch_size=BATCH_SIZE, shuffle=True, seed=SEED, subset='validation') checkpoint = tf.keras.callbacks.ModelCheckpoint(monitor='loss', mode='min', save_best_only=True, save_weights_only=True, filepath='./modelcheck') model_callbacks = [checkpoint] def Colorize(): encoder_input = Input(shape=INPUT_DIM) encoder_output = Conv2D(64, (3, 3), activation='relu', padding='same', strides=2)(encoder_input) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(128, (3, 3), activation='relu', padding='same', strides=2)(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='valid', strides=3)(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(512, (3, 3), activation='relu', padding='same')(encoder_output) encoder_output = Conv2D(256, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = Conv2D(128, (3, 3), activation='relu', padding='same')(encoder_output) decoder_output = UpSampling2D((3, 3))(decoder_output) decoder_output = Conv2D(64, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) decoder_output = Conv2D(32, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(16, (3, 3), activation='relu', padding='same')(decoder_output) decoder_output = Conv2D(3, (3, 3), activation='tanh', padding='same')(decoder_output) decoder_output = UpSampling2D((2, 2))(decoder_output) model = Model(inputs=encoder_input, outputs=decoder_output) return model model = Colorize() model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) model.summary() history = model.fit(train_datagen, batch_size=BATCH_SIZE, validation_data=test_datagen, epochs=EPOCHS, callbacks=model_callbacks) preds = model.predict(test_datagen) preds.shape
code
74058807/cell_12
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
print(images_gray.shape) print(images_col.shape)
code
2038627/cell_42
[ "text_html_output_1.png" ]
test_mean.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_21
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test[test.Item_Weight.isnull()]
code
2038627/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test.describe(include='all')
code
2038627/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum()
code
2038627/cell_25
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] test['Outlet_Size'].value_counts(dropna=False)
code
2038627/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) print(train['Outlet_Establishment_Year'].max()) print('\n') train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts()
code
2038627/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) train_2 = train.dropna(subset=['Item_Weight']) train_median = train.fillna(value=train_2['Item_Weight'].median()) test_2 = test.dropna(subset=['Item_Weight']) test_median = train.fillna(value=train_2['Item_Weight'].median()) train_median.head()
code
2038627/cell_44
[ "text_plain_output_1.png", "image_output_1.png" ]
train_median.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False)
code
2038627/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.head()
code
2038627/cell_40
[ "text_html_output_1.png" ]
train_mean.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False)
code
2038627/cell_39
[ "text_html_output_1.png" ]
train.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_41
[ "text_html_output_1.png" ]
test.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) print(train.shape)
code
2038627/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().head()
code
2038627/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train['Outlet_Size'].value_counts(dropna=False)
code
2038627/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape print(test['Outlet_Establishment_Year'].max()) test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts()
code
2038627/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
test_mode.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_18
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train['Item_Weight'].value_counts(dropna=False)
code
2038627/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) test_mean.head()
code
2038627/cell_28
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] test.head()
code
2038627/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().head()
code
2038627/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()]
code
2038627/cell_38
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) train_2 = train.dropna(subset=['Item_Weight']) train_median = train.fillna(value=train_2['Item_Weight'].median()) test_2 = test.dropna(subset=['Item_Weight']) test_median = train.fillna(value=train_2['Item_Weight'].median()) train_3 = train.dropna(subset=['Item_Weight']) train_mode = train.fillna(value=train_3['Item_Weight'].mode()) test_3 = test.dropna(subset=['Item_Weight']) test_mode = test.fillna(value=test_3['Item_Weight'].mode()) test_mode.head()
code
2038627/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.head()
code
2038627/cell_35
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) train_2 = train.dropna(subset=['Item_Weight']) train_median = train.fillna(value=train_2['Item_Weight'].median()) test_2 = test.dropna(subset=['Item_Weight']) test_median = train.fillna(value=train_2['Item_Weight'].median()) test_median.head()
code
2038627/cell_43
[ "text_html_output_1.png" ]
train_mode.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_31
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) train_mean.head()
code
2038627/cell_46
[ "text_plain_output_1.png", "image_output_1.png" ]
test_median.Item_Weight.plot(kind='hist', color='white', edgecolor='black', facecolor='blue', figsize=(12, 6), title='Item Weight Histogram')
code
2038627/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] test['Item_Weight'].value_counts(dropna=False)
code
2038627/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()]
code
2038627/cell_22
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum() test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()]
code
2038627/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train.describe(include='all')
code
2038627/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() train.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) train.head()
code
2038627/cell_37
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) train.Outlet_Establishment_Year = 2013 - train.Outlet_Establishment_Year train['Outlet_Establishment_Year'].value_counts() test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() train.isnull().sum() test.isnull().sum() train[train.Item_Weight.isnull()] train[train.Outlet_Size.isnull()] train.groupby('Outlet_Type').Outlet_Size.value_counts(dropna=False) test[test.Item_Weight.isnull()] test[test.Outlet_Size.isnull()] train.groupby('Item_Type').Item_Fat_Content.value_counts(dropna=False) train_1 = train.dropna(subset=['Item_Weight']) train_mean = train.fillna(value=train_1['Item_Weight'].mean()) test_1 = test.dropna(subset=['Item_Weight']) test_mean = test.fillna(value=test_1['Item_Weight'].mean()) train_2 = train.dropna(subset=['Item_Weight']) train_median = train.fillna(value=train_2['Item_Weight'].median()) test_2 = test.dropna(subset=['Item_Weight']) test_median = train.fillna(value=train_2['Item_Weight'].median()) train_3 = train.dropna(subset=['Item_Weight']) train_mode = train.fillna(value=train_3['Item_Weight'].mode()) test_3 = test.dropna(subset=['Item_Weight']) test_mode = test.fillna(value=test_3['Item_Weight'].mode()) train_mode.head()
code
2038627/cell_12
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape test.Outlet_Establishment_Year = 2013 - test.Outlet_Establishment_Year test.Outlet_Establishment_Year.value_counts() test.isnull().sum()
code
2038627/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train/Train.csv', header=0) test = pd.read_csv('../input/testpr-a102/Test.csv', header=0) test.shape
code
73094936/cell_42
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.image as mpimg import numpy as np numImages = 16 fig = plt.figure(figsize=(7, 7)) imgData = np.zeros(shape=(numImages, 36963)) for i in range(1, numImages + 1): filename = '../input/foodpics/pics/Picture' + str(i) + '.jpg' img = mpimg.imread(filename) ax = fig.add_subplot(4, 4, i) plt.imshow(img) plt.axis('off') ax.set_title(str(i)) imgData[i - 1] = np.array(img.flatten()).reshape(1, img.shape[0] * img.shape[1] * img.shape[2])
code
73094936/cell_21
[ "text_html_output_1.png" ]
import io import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import requests import io url = 'https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv' s = requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line', figsize=(15, 3)) ax.set_title('Daily Precipitation (variance = %.4f)' % daily.var())
code
73094936/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25]
code
73094936/cell_9
[ "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) print('Number of rows in original data = %d' % data.shape[0]) data2 = data.dropna() print('Number of rows after discarding missing values = %d' % data2.shape[0])
code
73094936/cell_25
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import io import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line',figsize=(15,3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % (monthly.var())) annual = daily.groupby(pd.Grouper(freq='Y')).sum() ax = annual.plot(kind='line', figsize=(15, 3)) ax.set_title('Annual Precipitation (variance = %.4f)' % annual.var())
code
73094936/cell_23
[ "text_plain_output_1.png" ]
import io import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line', figsize=(15, 3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % monthly.var())
code
73094936/cell_33
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample
code
73094936/cell_44
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import io import numpy as np import numpy as np import pandas as pd import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line',figsize=(15,3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % (monthly.var())) annual = daily.groupby(pd.Grouper(freq='Y')).sum() ax = annual.plot(kind='line',figsize=(15,3)) ax.set_title('Annual Precipitation (variance = %.4f)' % (annual.var())) sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample bins = pd.cut(data['Clump Thickness'], 4) bins.value_counts(sort=False) bins = pd.qcut(data['Clump Thickness'], 4) bins.value_counts(sort=False) import pandas as pd from sklearn.decomposition import PCA numComponents = 2 pca = PCA(n_components=numComponents) pca.fit(imgData) projected = pca.transform(imgData) projected = pd.DataFrame(projected, columns=['pc1', 'pc2'], index=range(1, numImages + 1)) projected['food'] = ['burger', 'burger', 'burger', 'burger', 'drink', 'drink', 'drink', 'drink', 'pasta', 'pasta', 'pasta', 'pasta', 'chicken', 'chicken', 'chicken', 'chicken'] projected
code
73094936/cell_40
[ "text_html_output_1.png" ]
import io import numpy as np import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line',figsize=(15,3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % (monthly.var())) annual = daily.groupby(pd.Grouper(freq='Y')).sum() ax = annual.plot(kind='line',figsize=(15,3)) ax.set_title('Annual Precipitation (variance = %.4f)' % (annual.var())) sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample bins = pd.cut(data['Clump Thickness'], 4) bins.value_counts(sort=False) bins = pd.qcut(data['Clump Thickness'], 4) bins.value_counts(sort=False)
code
73094936/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() sample = data.sample(n=3) sample
code
73094936/cell_11
[ "text_html_output_1.png", "text_plain_output_1.png" ]
data2 = data.drop(['Class'], axis=1) data2['Bare Nuclei'] = pd.to_numeric(data2['Bare Nuclei']) data2.boxplot(figsize=(20, 3))
code
73094936/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] print('Number of rows before discarding duplicates = %d' % data.shape[0]) data2 = data.drop_duplicates() print('Number of rows after discarding duplicates = %d' % data2.shape[0])
code
73094936/cell_7
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] print('Before replacing missing values:') print(data2[20:25]) data2 = data2.fillna(data2.median()) print('\nAfter replacing missing values:') print(data2[20:25])
code
73094936/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] print('Number of rows before discarding outliers = %d' % Z.shape[0]) Z2 = Z.loc[((Z > -3).sum(axis=1) == 9) & ((Z <= 3).sum(axis=1) == 9), :] print('Number of rows after discarding missing values = %d' % Z2.shape[0])
code
73094936/cell_38
[ "text_html_output_1.png" ]
import io import numpy as np import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line',figsize=(15,3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % (monthly.var())) annual = daily.groupby(pd.Grouper(freq='Y')).sum() ax = annual.plot(kind='line',figsize=(15,3)) ax.set_title('Annual Precipitation (variance = %.4f)' % (annual.var())) sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample bins = pd.cut(data['Clump Thickness'], 4) bins.value_counts(sort=False)
code
73094936/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) print('Number of instances = %d' % data.shape[0]) print('Number of attributes = %d' % data.shape[1]) data.head()
code
73094936/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() dups = data.duplicated() print('Number of duplicate rows = %d' % dups.sum()) data.loc[[11, 28]]
code
73094936/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample
code
73094936/cell_46
[ "text_plain_output_1.png" ]
from sklearn.decomposition import PCA import io import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import numpy as np import pandas as pd import pandas as pd import requests import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() #modified to load from remote URL, Ye 2021 import requests import io url = "https://cgi.luddy.indiana.edu/~yye/b565/data/DTW_prec.csv" s=requests.get(url).content daily = pd.read_csv(io.StringIO(s.decode('utf-8')), header='infer') daily.index = pd.to_datetime(daily['DATE']) daily = daily['PRCP'] ax = daily.plot(kind='line',figsize=(15,3)) ax.set_title('Daily Precipitation (variance = %.4f)' % (daily.var())) monthly = daily.groupby(pd.Grouper(freq='M')).sum() ax = monthly.plot(kind='line',figsize=(15,3)) ax.set_title('Monthly Precipitation (variance = %.4f)' % (monthly.var())) annual = daily.groupby(pd.Grouper(freq='Y')).sum() ax = annual.plot(kind='line',figsize=(15,3)) ax.set_title('Annual Precipitation (variance = %.4f)' % (annual.var())) sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample bins = pd.cut(data['Clump Thickness'], 4) bins.value_counts(sort=False) bins = pd.qcut(data['Clump Thickness'], 4) bins.value_counts(sort=False) import pandas as pd from sklearn.decomposition import PCA numComponents = 2 pca = PCA(n_components=numComponents) pca.fit(imgData) projected = pca.transform(imgData) projected = pd.DataFrame(projected, columns=['pc1', 'pc2'], index=range(1, numImages + 1)) projected['food'] = ['burger', 'burger', 'burger', 'burger', 'drink', 'drink', 'drink', 'drink', 'pasta', 'pasta', 'pasta', 'pasta', 'chicken', 'chicken', 'chicken', 'chicken'] projected import matplotlib.pyplot as plt colors = {'burger': 'b', 'drink': 'r', 'pasta': 'g', 'chicken': 'k'} markerTypes = {'burger': '+', 'drink': 'x', 'pasta': 'o', 'chicken': 's'} for foodType in markerTypes: d = projected[projected['food'] == foodType] plt.scatter(d['pc1'], d['pc2'], c=colors[foodType], s=60, marker=markerTypes[foodType])
code
73094936/cell_27
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() data.head()
code
73094936/cell_5
[ "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) print('Number of instances = %d' % data.shape[0]) print('Number of attributes = %d' % data.shape[1]) print('Number of missing values:') for col in data.columns: print('\t%s: %d' % (col, data[col].isna().sum()))
code
73094936/cell_36
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data', header=None) data.columns = ['Sample code', 'Clump Thickness', 'Uniformity of Cell Size', 'Uniformity of Cell Shape', 'Marginal Adhesion', 'Single Epithelial Cell Size', 'Bare Nuclei', 'Bland Chromatin', 'Normal Nucleoli', 'Mitoses', 'Class'] data = data.drop(['Sample code'], axis=1) import numpy as np data = data.replace('?', np.NaN) data2 = data['Bare Nuclei'] data2 = data2.fillna(data2.median()) data2 = data.dropna() Z = (data2 - data2.mean()) / data2.std() Z[20:25] dups = data.duplicated() data.loc[[11, 28]] data2 = data.drop_duplicates() sample = data.sample(n=3) sample sample = data.sample(frac=0.01, random_state=1) sample sample = data.sample(frac=0.01, replace=True, random_state=1) sample data['Clump Thickness'].hist(bins=10) data['Clump Thickness'].value_counts(sort=False)
code
89132155/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
2022597/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) data.plot.scatter(x=var3, y='price', ylim=(0, 8000000))
code
2022597/cell_6
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.describe()
code
2022597/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y='price', data=data) fig.axis(ymin=0, ymax=8000000)
code
2022597/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.info()
code
2022597/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') f, ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax)
code
2022597/cell_16
[ "image_output_1.png" ]
from sklearn.linear_model import LinearRegression import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) X = df[[var, var1, var2, var3, var4]] y = df['price'] LinReg = LinearRegression(normalize=True) LinReg.fit(X, y) print(LinReg.score(X, y))
code
2022597/cell_3
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import scale import statsmodels.api as sm from sklearn.preprocessing import StandardScaler scale = StandardScaler()
code
2022597/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb import statsmodels.api as sm df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) X = df[[var, var1, var2, var3, var4]] y = df['price'] est = sm.OLS(y, X).fit() est.summary()
code
2022597/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) var3 = 'sqft_above' data = pd.concat([df['price'], df[var3]], axis=1) var4 = 'bathrooms' data = pd.concat([df['price'], df[var4]], axis=1) data.plot.scatter(x=var4, y='price', ylim=(0, 8000000))
code
2022597/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) data.plot.scatter(x=var, y='price', ylim=(0, 8000000))
code
2022597/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sb df = pd.read_csv('../input/kc_house_data.csv') #df correlation matrix f,ax = plt.subplots(figsize=(12, 9)) sb.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax) var = 'sqft_living' data = pd.concat([df['price'], df[var]], axis=1) #boxplot 'grade'/'price' var1 = 'grade' data = pd.concat([df['price'], df[var1]], axis=1) f, ax = plt.subplots(figsize=(8, 6)) fig = sb.boxplot(x=var1, y="price", data=data) fig.axis(ymin=0, ymax=8000000); var2 = 'sqft_living15' data = pd.concat([df['price'], df[var2]], axis=1) data.plot.scatter(x=var2, y='price', ylim=(0, 8000000))
code
2022597/cell_5
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/kc_house_data.csv') df.head()
code
1009893/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from glob import glob import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) def get_filename(image_id, image_type): """ Method to get image file path from its id and type """ try: ['Type_1', 'Type_2', 'Type_3'].index(image_type) except: raise Exception('Image type {} is not recognized'.format(image_type)) ext = 'jpg' data_path = os.path.join(TRAIN_DATA, image_type) return os.path.join(data_path, '{}.{}'.format(image_id, ext)) import cv2 def get_image_data(image_id, image_type): """ Method to get image data as np.array specifying image id and type """ fname = get_filename(image_id, image_type) img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img l = len(type_1_ids) np.floor(25.6) tile_size tile_size = (256, 256) n = 10 m = int(np.floor(l / n)) test_zeros = np.zeros((2, 4, 3), dtype=np.uint8) test_zeros tile_size = (256, 256) n = 15 complete_images = [] for k, type_ids in enumerate([type_1_ids, type_2_ids, type_3_ids]): m = int(np.floor(len(type_ids) / n)) complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8) train_ids = sorted(type_ids) counter = 0 for i in range(m): ys = i * (tile_size[1] + 2) ye = ys + tile_size[1] for j in range(n): xs = j * (tile_size[0] + 2) xe = xs + tile_size[0] image_id = train_ids[counter] counter += 1 img = get_image_data(image_id, 'Type_%i' % (k + 1)) img = cv2.resize(img, dsize=tile_size) complete_image[ys:ye, xs:xe] = img[:, :, :] complete_images.append(complete_image) plt_st() plt.title('Training dataset of type 1') plt.imshow(complete_image)
code
1009893/cell_6
[ "text_plain_output_1.png" ]
from glob import glob import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) l = len(type_1_ids) np.floor(25.6) tile_size
code
1009893/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
from glob import glob import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) print(len(type_1_files), len(type_2_files), len(type_3_files)) print('Type 1', type_1_ids[:10]) print('Type 2', type_2_ids[:10]) print('Type 3', type_3_ids[:10])
code
1009893/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1009893/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from glob import glob import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) l = len(type_1_ids) np.floor(25.6) tile_size tile_size = (256, 256) n = 10 m = int(np.floor(l / n)) test_zeros = np.zeros((2, 4, 3), dtype=np.uint8) test_zeros
code
1009893/cell_17
[ "text_plain_output_1.png" ]
from glob import glob import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os import os from glob import glob TRAIN_DATA = '../input/train' type_1_files = glob(os.path.join(TRAIN_DATA, 'Type_1', '*.jpg')) type_2_files = glob(os.path.join(TRAIN_DATA, 'Type_2', '*.jpg')) type_3_files = glob(os.path.join(TRAIN_DATA, 'Type_3', '*.jpg')) type_1_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_1')) + 1:-4] for s in type_1_files]) type_2_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_2')) + 1:-4] for s in type_2_files]) type_3_ids = np.array([s[len(os.path.join(TRAIN_DATA, 'Type_3')) + 1:-4] for s in type_3_files]) def get_filename(image_id, image_type): """ Method to get image file path from its id and type """ try: ['Type_1', 'Type_2', 'Type_3'].index(image_type) except: raise Exception('Image type {} is not recognized'.format(image_type)) ext = 'jpg' data_path = os.path.join(TRAIN_DATA, image_type) return os.path.join(data_path, '{}.{}'.format(image_id, ext)) import cv2 def get_image_data(image_id, image_type): """ Method to get image data as np.array specifying image id and type """ fname = get_filename(image_id, image_type) img = cv2.imread(fname) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img l = len(type_1_ids) np.floor(25.6) tile_size tile_size = (256, 256) n = 10 m = int(np.floor(l / n)) test_zeros = np.zeros((2, 4, 3), dtype=np.uint8) test_zeros tile_size = (256, 256) n = 15 complete_images = [] for k, type_ids in enumerate([type_1_ids, type_2_ids, type_3_ids]): m = int(np.floor(len(type_ids) / n)) complete_image = np.zeros((m * (tile_size[0] + 2), n * (tile_size[1] + 2), 3), dtype=np.uint8) train_ids = sorted(type_ids) counter = 0 for i in range(m): ys = i * (tile_size[1] + 2) ye = ys + tile_size[1] for j in range(n): xs = j * (tile_size[0] + 2) xe = xs + tile_size[0] image_id = train_ids[counter] counter += 1 img = get_image_data(image_id, 'Type_%i' % (k + 1)) img = cv2.resize(img, dsize=tile_size) complete_image[ys:ye, xs:xe] = img[:, :, :] complete_images.append(complete_image) plt_st() plt_st(20, 20) plt.imshow(complete_images[0]) plt.title('Training dataset of type %i' % 1)
code
90154229/cell_13
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model import matplotlib.pyplot as plt import numpy as np diabetes = datasets.load_diabetes() diabetes_X = diabetes.data[:, np.newaxis, 2] diabetes_X_train = diabetes_X[:-30] diabetes_X_test = diabetes_X[-20:] diabetes_y_train = diabetes.target[:-30] diabetes_y_test = diabetes.target[-20:] model = linear_model.LinearRegression() model.fit(diabetes_X_train, diabetes_y_train) diabetes_y_predicted = model.predict(diabetes_X_test) plt.plot(diabetes_X_test, diabetes_y_predicted)
code
90154229/cell_11
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model import numpy as np diabetes = datasets.load_diabetes() diabetes_X = diabetes.data[:, np.newaxis, 2] diabetes_X_train = diabetes_X[:-30] diabetes_X_test = diabetes_X[-20:] diabetes_y_train = diabetes.target[:-30] diabetes_y_test = diabetes.target[-20:] model = linear_model.LinearRegression() model.fit(diabetes_X_train, diabetes_y_train) diabetes_y_predicted = model.predict(diabetes_X_test) print('weights:', model.coef_) print('Intercept:', model.intercept_)
code
90154229/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import datasets, linear_model import numpy as np diabetes = datasets.load_diabetes() diabetes_X = diabetes.data[:, np.newaxis, 2] diabetes_X_train = diabetes_X[:-30] diabetes_X_test = diabetes_X[-20:] diabetes_y_train = diabetes.target[:-30] diabetes_y_test = diabetes.target[-20:] model = linear_model.LinearRegression() model.fit(diabetes_X_train, diabetes_y_train)
code
90154229/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn import datasets, linear_model diabetes = datasets.load_diabetes() print(diabetes.keys())
code
90154229/cell_10
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model from sklearn.metrics import mean_squared_error import numpy as np diabetes = datasets.load_diabetes() diabetes_X = diabetes.data[:, np.newaxis, 2] diabetes_X_train = diabetes_X[:-30] diabetes_X_test = diabetes_X[-20:] diabetes_y_train = diabetes.target[:-30] diabetes_y_test = diabetes.target[-20:] model = linear_model.LinearRegression() model.fit(diabetes_X_train, diabetes_y_train) diabetes_y_predicted = model.predict(diabetes_X_test) print('Mean squared error is:', mean_squared_error(diabetes_y_test, diabetes_y_predicted))
code
90154229/cell_12
[ "text_plain_output_1.png" ]
from sklearn import datasets, linear_model import matplotlib.pyplot as plt import numpy as np diabetes = datasets.load_diabetes() diabetes_X = diabetes.data[:, np.newaxis, 2] diabetes_X_train = diabetes_X[:-30] diabetes_X_test = diabetes_X[-20:] diabetes_y_train = diabetes.target[:-30] diabetes_y_test = diabetes.target[-20:] plt.scatter(diabetes_X_test, diabetes_y_test)
code
106191525/cell_21
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from sklearn.model_selection import train_test_split from sklearn.neighbors import LocalOutlierFactor import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() radius = data[['radius_mean', 'radius_se', 'radius_worst', 'diagnosis']] texture = data[['texture_mean', 'texture_se', 'texture_worst', 'diagnosis']] perimeter = data[['perimeter_mean', 'perimeter_se', 'perimeter_worst', 'diagnosis']] area = data[['area_mean', 'area_se', 'area_worst', 'diagnosis']] smoothness = data[['smoothness_mean', 'smoothness_se', 'smoothness_worst', 'diagnosis']] compactness = data[['compactness_mean', 'compactness_se', 'compactness_worst', 'diagnosis']] concavity = data[['concavity_mean', 'concavity_se', 'concavity_worst', 'diagnosis']] concave_points = data[['concave points_mean', 'concave points_se', 'concave points_worst', 'diagnosis']] symmetry = data[['symmetry_mean', 'symmetry_se', 'symmetry_worst', 'diagnosis']] fractal_dimension = data[['fractal_dimension_mean', 'fractal_dimension_se', 'fractal_dimension_worst', 'diagnosis']] X = Important_Data.drop(['diagnosis'], axis=1) Y = Important_Data.diagnosis columns = Important_Data.columns.tolist() lof = LocalOutlierFactor() y_pred = lof.fit_predict(X) y_pred[0:30] x_score = lof.negative_outlier_factor_ outlier_score = pd.DataFrame() outlier_score['score'] = x_score lofthreshold = -2.5 loffilter = outlier_score['score'] < lofthreshold outlier_index = outlier_score[loffilter].index.tolist() radius = (x_score.max() - x_score) / (x_score.max() - x_score.min()) outlier_score['radius'] = radius X = X.drop(outlier_index) Y = Y.drop(outlier_index).values x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=10) print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) x_train.head()
code
106191525/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] print(top_feature.values) Important_Data = data[top_feature.values] plt.subplots(figsize=(20, 10)) top_corr = data[top_feature].corr() sns.heatmap(top_corr, annot=True) plt.show()
code
106191525/cell_9
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') plt.title('Diagnostic Distribution') C = data['diagnosis'].value_counts() C.plot(kind='pie') print(C)
code
106191525/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.neighbors import LocalOutlierFactor import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns data = pd.read_csv('../input/breast-cancer/breast-cancer - breast-cancer.csv') C = data['diagnosis'].value_counts() corr = data.corr() top_feature = corr.index[abs(corr['diagnosis']) > 0.5] Important_Data = data[top_feature.values] top_corr = data[top_feature].corr() radius = data[['radius_mean', 'radius_se', 'radius_worst', 'diagnosis']] texture = data[['texture_mean', 'texture_se', 'texture_worst', 'diagnosis']] perimeter = data[['perimeter_mean', 'perimeter_se', 'perimeter_worst', 'diagnosis']] area = data[['area_mean', 'area_se', 'area_worst', 'diagnosis']] smoothness = data[['smoothness_mean', 'smoothness_se', 'smoothness_worst', 'diagnosis']] compactness = data[['compactness_mean', 'compactness_se', 'compactness_worst', 'diagnosis']] concavity = data[['concavity_mean', 'concavity_se', 'concavity_worst', 'diagnosis']] concave_points = data[['concave points_mean', 'concave points_se', 'concave points_worst', 'diagnosis']] symmetry = data[['symmetry_mean', 'symmetry_se', 'symmetry_worst', 'diagnosis']] fractal_dimension = data[['fractal_dimension_mean', 'fractal_dimension_se', 'fractal_dimension_worst', 'diagnosis']] X = Important_Data.drop(['diagnosis'], axis=1) Y = Important_Data.diagnosis columns = Important_Data.columns.tolist() lof = LocalOutlierFactor() y_pred = lof.fit_predict(X) y_pred[0:30] x_score = lof.negative_outlier_factor_ outlier_score = pd.DataFrame() outlier_score['score'] = x_score lofthreshold = -2.5 loffilter = outlier_score['score'] < lofthreshold outlier_index = outlier_score[loffilter].index.tolist() plt.figure(figsize=(12, 8.0)) plt.scatter(X.iloc[outlier_index, 0], X.iloc[outlier_index, 4], color='blue', s=50, label='outliers') plt.scatter(X.iloc[:, 0], X.iloc[:, 4], color='k', s=3, label='Data Points') radius = (x_score.max() - x_score) / (x_score.max() - x_score.min()) outlier_score['radius'] = radius plt.scatter(X.iloc[:, 0], X.iloc[:, 4], s=1000 * radius, edgecolors='r', facecolors='none', label='outlier scores') plt.legend() X = X.drop(outlier_index) Y = Y.drop(outlier_index).values
code