path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
90124932/cell_4 | [
"text_plain_output_1.png"
] | import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
print(len(train_images_paths))
print(len(valid_images_paths)) | code |
90124932/cell_6 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
dense_model_1.trainable = True
for layer in dense_model_1.layers[:350]:
layer.trainable = False
x = dense_model_1(input_image)
print('x1', x.shape)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
print('x2', x.shape)
x = tf.keras.layers.Dense(81, activation='relu')(x)
print('x3', x.shape)
x = tf.keras.layers.Dense(81, activation='relu')(x)
print('x4', x.shape)
x = tf.keras.layers.Dense(42, activation='relu')(x)
print('x5', x.shape)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
print('x6', preds_dense_net.shape)
dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False)
dense_model_2.trainable = True
for layer in dense_model_2.layers[:116]:
layer.trainable = False
y = dense_model_2(input_image)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
dense_model_3.trainable = True
for layer in dense_model_3.layers[:70]:
layer.trainable = False
z = dense_model_3(input_image)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
model = tf.keras.models.Model(input_image, mean_nn_only) | code |
90124932/cell_1 | [
"text_plain_output_1.png"
] | import tensorflow as tf
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
print(tf.__version__)
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa | code |
90124932/cell_7 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
from keras_preprocessing.image import ImageDataGenerator
import math
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow_addons as tfa
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_ELBOW = train_images_paths[train_images_paths['category'] == 'XR_ELBOW']
valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW']
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
train_images_paths_XR_FOREARM = train_images_paths[train_images_paths['category'] == 'XR_FOREARM']
valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM']
train_images_paths_XR_HAND = train_images_paths[train_images_paths['category'] == 'XR_HAND']
valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND']
train_images_paths_XR_HUMERUS = train_images_paths[train_images_paths['category'] == 'XR_HUMERUS']
valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS']
train_images_paths_XR_SHOULDER = train_images_paths[train_images_paths['category'] == 'XR_SHOULDER']
valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER']
train_images_paths_XR_WRIST = train_images_paths[train_images_paths['category'] == 'XR_WRIST']
valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST']
datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10)
images_path_dir = '../input/mura-dataset'
batchsize = 32
targetsize = (224, 224)
classmode = 'binary'
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_ELBOW = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FOREARM = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HAND = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HUMERUS = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_SHOULDER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_WRIST = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
dense_model_1.trainable = True
for layer in dense_model_1.layers[:350]:
layer.trainable = False
x = dense_model_1(input_image)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False)
dense_model_2.trainable = True
for layer in dense_model_2.layers[:116]:
layer.trainable = False
y = dense_model_2(input_image)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
dense_model_3.trainable = True
for layer in dense_model_3.layers[:70]:
layer.trainable = False
z = dense_model_3(input_image)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
model = tf.keras.models.Model(input_image, mean_nn_only)
STEP_SIZE_TRAIN = math.ceil(train_generator.n / train_generator.batch_size)
STEP_SIZE_VALID = math.ceil(valid_generator.n / valid_generator.batch_size)
print(STEP_SIZE_TRAIN)
print(STEP_SIZE_VALID)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy', tfa.metrics.CohenKappa(num_classes=2), tf.keras.metrics.Precision(0.6), tf.keras.metrics.Recall(0.3), tf.keras.metrics.AUC()])
history = model.fit_generator(train_generator, epochs=20, verbose=1, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID) | code |
90124932/cell_8 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
from keras_preprocessing.image import ImageDataGenerator
import math
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt # showing and rendering figures
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow_addons as tfa
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_ELBOW = train_images_paths[train_images_paths['category'] == 'XR_ELBOW']
valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW']
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
train_images_paths_XR_FOREARM = train_images_paths[train_images_paths['category'] == 'XR_FOREARM']
valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM']
train_images_paths_XR_HAND = train_images_paths[train_images_paths['category'] == 'XR_HAND']
valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND']
train_images_paths_XR_HUMERUS = train_images_paths[train_images_paths['category'] == 'XR_HUMERUS']
valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS']
train_images_paths_XR_SHOULDER = train_images_paths[train_images_paths['category'] == 'XR_SHOULDER']
valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER']
train_images_paths_XR_WRIST = train_images_paths[train_images_paths['category'] == 'XR_WRIST']
valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST']
datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10)
images_path_dir = '../input/mura-dataset'
batchsize = 32
targetsize = (224, 224)
classmode = 'binary'
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_ELBOW = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FOREARM = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HAND = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HUMERUS = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_SHOULDER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_WRIST = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
dense_model_1.trainable = True
for layer in dense_model_1.layers[:350]:
layer.trainable = False
x = dense_model_1(input_image)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False)
dense_model_2.trainable = True
for layer in dense_model_2.layers[:116]:
layer.trainable = False
y = dense_model_2(input_image)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
dense_model_3.trainable = True
for layer in dense_model_3.layers[:70]:
layer.trainable = False
z = dense_model_3(input_image)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
model = tf.keras.models.Model(input_image, mean_nn_only)
STEP_SIZE_TRAIN = math.ceil(train_generator.n / train_generator.batch_size)
STEP_SIZE_VALID = math.ceil(valid_generator.n / valid_generator.batch_size)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01), loss='binary_crossentropy', metrics=['accuracy', tfa.metrics.CohenKappa(num_classes=2), tf.keras.metrics.Precision(0.6), tf.keras.metrics.Recall(0.3), tf.keras.metrics.AUC()])
history = model.fit_generator(train_generator, epochs=20, verbose=1, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID)
epochs = range(1, 21)
plt.plot(epochs, history.history['accuracy'], 'g', label='training accuracy')
plt.plot(epochs, history.history['val_accuracy'], 'b', label='validation accuracy')
plt.title('Training and Validation accuracy Wrist')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend()
plt.show()
plt.savefig('acc_densenet_forearm.png')
plt.plot(epochs, history.history['loss'], 'r', label='training loss')
plt.plot(epochs, history.history['val_loss'], 'c', label='validation loss')
plt.title('Training and Validation loss Wrist')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
plt.show()
plt.savefig('loss_densenet_forearm.png')
plt.plot(epochs, history.history['cohen_kappa'], 'y', label='training cohen_kappa')
plt.plot(epochs, history.history['val_cohen_kappa'], 'm', label='validation cohen_kappa')
plt.title('Training and Validation cohen_kappa Wrist')
plt.ylabel('Accuracy, Loss & cohen_kappa')
plt.xlabel('epoch')
plt.legend()
plt.savefig('kappa_densenet_forearm.png') | code |
90124932/cell_14 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
from keras_preprocessing.image import ImageDataGenerator
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_ELBOW = train_images_paths[train_images_paths['category'] == 'XR_ELBOW']
valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW']
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
train_images_paths_XR_FOREARM = train_images_paths[train_images_paths['category'] == 'XR_FOREARM']
valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM']
train_images_paths_XR_HAND = train_images_paths[train_images_paths['category'] == 'XR_HAND']
valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND']
train_images_paths_XR_HUMERUS = train_images_paths[train_images_paths['category'] == 'XR_HUMERUS']
valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS']
train_images_paths_XR_SHOULDER = train_images_paths[train_images_paths['category'] == 'XR_SHOULDER']
valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER']
train_images_paths_XR_WRIST = train_images_paths[train_images_paths['category'] == 'XR_WRIST']
valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST']
datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10)
images_path_dir = '../input/mura-dataset'
batchsize = 32
targetsize = (224, 224)
classmode = 'binary'
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_ELBOW = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FOREARM = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HAND = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HUMERUS = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_SHOULDER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_WRIST = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
dense_model_1.trainable = True
for layer in dense_model_1.layers[:350]:
layer.trainable = False
x = dense_model_1(input_image)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False)
dense_model_2.trainable = True
for layer in dense_model_2.layers[:116]:
layer.trainable = False
y = dense_model_2(input_image)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
dense_model_3.trainable = True
for layer in dense_model_3.layers[:70]:
layer.trainable = False
z = dense_model_3(input_image)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
model = tf.keras.models.Model(input_image, mean_nn_only)
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM']
valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND']
valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS']
valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER']
valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST']
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
images_path_dir = '../input/mura-dataset'
batchsize = 16
targetsize = (224, 224)
classmode = 'binary'
valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) | code |
90124932/cell_12 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import pandas as pd
import os
from glob import glob
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
from keras.models import Model
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from glob import glob
import numpy as np
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D
from keras_preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
from keras import applications
from tensorflow import keras
import math
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
dense_model_1.trainable = True
for layer in dense_model_1.layers[:350]:
layer.trainable = False
x = dense_model_1(input_image)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.Xception(weights='imagenet', include_top=False)
dense_model_2.trainable = True
for layer in dense_model_2.layers[:116]:
layer.trainable = False
y = dense_model_2(input_image)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
dense_model_3.trainable = True
for layer in dense_model_3.layers[:70]:
layer.trainable = False
z = dense_model_3(input_image)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
model = tf.keras.models.Model(input_image, mean_nn_only)
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
print(len(valid_images_paths)) | code |
90124932/cell_5 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | from keras_preprocessing.image import ImageDataGenerator
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_img_csv = '../input/mura-dataset/MURA-v1.1/train_image_paths.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_img_csv = '../input/testdata/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_ELBOW = train_images_paths[train_images_paths['category'] == 'XR_ELBOW']
valid_images_paths_XR_ELBOW = valid_images_paths[valid_images_paths['category'] == 'XR_ELBOW']
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
train_images_paths_XR_FOREARM = train_images_paths[train_images_paths['category'] == 'XR_FOREARM']
valid_images_paths_XR_FOREARM = valid_images_paths[valid_images_paths['category'] == 'XR_FOREARM']
train_images_paths_XR_HAND = train_images_paths[train_images_paths['category'] == 'XR_HAND']
valid_images_paths_XR_HAND = valid_images_paths[valid_images_paths['category'] == 'XR_HAND']
train_images_paths_XR_HUMERUS = train_images_paths[train_images_paths['category'] == 'XR_HUMERUS']
valid_images_paths_XR_HUMERUS = valid_images_paths[valid_images_paths['category'] == 'XR_HUMERUS']
train_images_paths_XR_SHOULDER = train_images_paths[train_images_paths['category'] == 'XR_SHOULDER']
valid_images_paths_XR_SHOULDER = valid_images_paths[valid_images_paths['category'] == 'XR_SHOULDER']
train_images_paths_XR_WRIST = train_images_paths[train_images_paths['category'] == 'XR_WRIST']
valid_images_paths_XR_WRIST = valid_images_paths[valid_images_paths['category'] == 'XR_WRIST']
datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=10)
images_path_dir = '../input/mura-dataset'
batchsize = 32
targetsize = (224, 224)
classmode = 'binary'
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator = test_datagen.flow_from_dataframe(dataframe=valid_images_paths, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_ELBOW = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_ELBOW = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_ELBOW, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FINGER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_FOREARM = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_FOREARM = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FOREARM, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HAND = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HAND = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HAND, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_HUMERUS = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_HUMERUS = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_HUMERUS, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_SHOULDER = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_SHOULDER = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_SHOULDER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
train_generator_XR_WRIST = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True)
valid_generator_XR_WRIST = test_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_WRIST, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, suffle=True) | code |
72068023/cell_42 | [
"text_plain_output_1.png"
] | from numba import njit, jit, cuda, guvectorize
from scipy.signal import butter, filtfilt, sosfiltfilt
from scipy.signal import cwt, ricker
from scipy.signal import spectrogram
import glob
import librosa
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from numba import njit, jit, cuda, guvectorize
@njit(nogil=True)
def min_max_scaler(wave):
for i in range(len(wave)):
wave[i] = (wave[i] - min(wave[i])) / (max(wave[i]) - min(wave[i]))
wave[i] = 2 * wave[i] - 1
return wave
wave1 = min_max_scaler(wave)
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
from scipy.signal import spectrogram
for i in range(len(wave)):
f, t, Sxx = spectrogram(wave1[i], fs=10)
plt.pcolormesh(t, f, Sxx, shading='gouraud')
def wrapper_plot(m):
m()
stacked = []
for j in range(len(wave1)):
melspec = librosa.feature.melspectrogram(wave1[j], sr=4096, n_mels=128, fmin=21.83, fmax=2048)
melspec = librosa.power_to_db(melspec)
melspec = melspec.transpose((1, 0))
stacked.append(melspec)
image = np.vstack(stacked)
wave.shape
cwt(wave[0], signal.ricker, np.arange(1, 300))
class CWT(nn.Module):
def __init__(self, widths, wavelet='ricker', channels=1, filter_len=2000):
"""PyTorch implementation of a continuous wavelet transform.
Args:
widths (iterable): The wavelet scales to use, e.g. np.arange(1, 33)
wavelet (str, optional): Name of wavelet. Either "ricker" or "morlet".
Defaults to "ricker".
channels (int, optional): Number of audio channels in the input. Defaults to 3.
filter_len (int, optional): Size of the wavelet filter bank. Set to
the number of samples but can be smaller to save memory. Defaults to 2000.
"""
super().__init__()
self.widths = widths
self.wavelet = getattr(self, wavelet)
self.filter_len = filter_len
self.channels = channels
self.wavelet_bank = self._build_wavelet_bank()
def ricker(self, points, a):
A = 2 / (np.sqrt(3 * a) * np.pi ** 0.25)
wsq = a ** 2
vec = torch.arange(0, points) - (points - 1.0) / 2
xsq = vec ** 2
mod = 1 - xsq / wsq
gauss = torch.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet(self, points, s):
x = torch.arange(0, points) - (points - 1.0) / 2
x = x / s
wavelet = torch.exp(-x ** 2.0 / 2.0) * torch.cos(5.0 * x)
output = np.sqrt(1 / s) * wavelet
return output
def cmorlet(self, points, s, wavelet_width=1, center_freq=1):
x = torch.arange(0, points) - (points - 1.0) / 2
x = x / s
norm_constant = np.sqrt(np.pi * wavelet_width)
exp_term = torch.exp(-x ** 2 / wavelet_width)
kernel_base = exp_term / norm_constant
kernel = kernel_base * torch.exp(1j * 2 * np.pi * center_freq * x)
return kernel
def _build_wavelet_bank(self):
"""This function builds a 2D wavelet filter using wavelets at different scales
Returns:
tensor: Tensor of shape (num_widths, 1, channels, filter_len)
"""
wavelet_bank = [torch.conj(torch.flip(self.wavelet(self.filter_len, w), [-1])) for w in self.widths]
wavelet_bank = torch.stack(wavelet_bank)
wavelet_bank = wavelet_bank.view(wavelet_bank.shape[0], 1, 1, wavelet_bank.shape[1])
wavelet_bank = torch.cat([wavelet_bank] * self.channels, 2)
return wavelet_bank
def forward(self, x):
"""Compute CWT arrays from a batch of multi-channel inputs
Args:
x (torch.tensor): Tensor of shape (batch_size, channels, time)
Returns:
torch.tensor: Tensor of shape (batch_size, channels, widths, time)
"""
x = x.unsqueeze(1)
if self.wavelet_bank.is_complex():
wavelet_real = self.wavelet_bank.real.to(device=x.device, dtype=x.dtype)
wavelet_imag = self.wavelet_bank.imag.to(device=x.device, dtype=x.dtype)
output_real = nn.functional.conv2d(x, wavelet_real, padding='same')
output_imag = nn.functional.conv2d(x, wavelet_imag, padding='same')
output_real = torch.transpose(output_real, 1, 2)
output_imag = torch.transpose(output_imag, 1, 2)
return torch.complex(output_real, output_imag)
else:
self.wavelet_bank = self.wavelet_bank.to(device=x.device, dtype=x.dtype)
output = nn.functional.conv2d(x, self.wavelet_bank, padding='same')
return torch.transpose(output, 1, 2)
widths = np.arange(20, 120)
pycwt = CWT(widths, 'cmorlet', 3, 4096)
def apply_qtransform(waves, transform=CQT1992v2(sr=2048, fmin=21.83, fmax=1024, hop_length=64), cuda=False):
waves = min_max_scaler(butter_bandpass_filter(waves, 21.83, 500, fs, 4))
waves = torch.from_numpy(waves).float().view(1, 3, 4096)
if cuda:
waves = waves.cuda()
image = pycwt(waves)
return image
imgs = []
for i in range(10):
img = apply_qtransform(wave, transform=CQT1992v2(sr=2048, fmin=21.83, n_bins=63, hop_length=64))
imgs.append(img)
img[0, 0].shape | code |
72068023/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from numba import njit, jit, cuda, guvectorize
from scipy.signal import butter, filtfilt, sosfiltfilt
from scipy.signal import spectrogram
import glob
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from numba import njit, jit, cuda, guvectorize
@njit(nogil=True)
def min_max_scaler(wave):
for i in range(len(wave)):
wave[i] = (wave[i] - min(wave[i])) / (max(wave[i]) - min(wave[i]))
wave[i] = 2 * wave[i] - 1
return wave
wave1 = min_max_scaler(wave)
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
from scipy.signal import spectrogram
plt.figure(dpi=120)
for i in range(len(wave)):
f, t, Sxx = spectrogram(wave1[i], fs=10)
plt.pcolormesh(t, f, Sxx, shading='gouraud') | code |
72068023/cell_25 | [
"image_output_1.png"
] | t.min() | code |
72068023/cell_4 | [
"text_plain_output_1.png"
] | import fastai
import fastai
import torch
fastai.__version__ | code |
72068023/cell_33 | [
"text_plain_output_1.png"
] | import gc
import gc
gc.collect() | code |
72068023/cell_40 | [
"text_plain_output_1.png"
] | from numba import njit, jit, cuda, guvectorize
from scipy.signal import butter, filtfilt, sosfiltfilt
from scipy.signal import cwt, ricker
from scipy.signal import spectrogram
import glob
import librosa
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from numba import njit, jit, cuda, guvectorize
@njit(nogil=True)
def min_max_scaler(wave):
for i in range(len(wave)):
wave[i] = (wave[i] - min(wave[i])) / (max(wave[i]) - min(wave[i]))
wave[i] = 2 * wave[i] - 1
return wave
wave1 = min_max_scaler(wave)
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
from scipy.signal import spectrogram
for i in range(len(wave)):
f, t, Sxx = spectrogram(wave1[i], fs=10)
plt.pcolormesh(t, f, Sxx, shading='gouraud')
def wrapper_plot(m):
m()
stacked = []
for j in range(len(wave1)):
melspec = librosa.feature.melspectrogram(wave1[j], sr=4096, n_mels=128, fmin=21.83, fmax=2048)
melspec = librosa.power_to_db(melspec)
melspec = melspec.transpose((1, 0))
stacked.append(melspec)
image = np.vstack(stacked)
wave.shape
cwt(wave[0], signal.ricker, np.arange(1, 300))
class CWT(nn.Module):
def __init__(self, widths, wavelet='ricker', channels=1, filter_len=2000):
"""PyTorch implementation of a continuous wavelet transform.
Args:
widths (iterable): The wavelet scales to use, e.g. np.arange(1, 33)
wavelet (str, optional): Name of wavelet. Either "ricker" or "morlet".
Defaults to "ricker".
channels (int, optional): Number of audio channels in the input. Defaults to 3.
filter_len (int, optional): Size of the wavelet filter bank. Set to
the number of samples but can be smaller to save memory. Defaults to 2000.
"""
super().__init__()
self.widths = widths
self.wavelet = getattr(self, wavelet)
self.filter_len = filter_len
self.channels = channels
self.wavelet_bank = self._build_wavelet_bank()
def ricker(self, points, a):
A = 2 / (np.sqrt(3 * a) * np.pi ** 0.25)
wsq = a ** 2
vec = torch.arange(0, points) - (points - 1.0) / 2
xsq = vec ** 2
mod = 1 - xsq / wsq
gauss = torch.exp(-xsq / (2 * wsq))
total = A * mod * gauss
return total
def morlet(self, points, s):
x = torch.arange(0, points) - (points - 1.0) / 2
x = x / s
wavelet = torch.exp(-x ** 2.0 / 2.0) * torch.cos(5.0 * x)
output = np.sqrt(1 / s) * wavelet
return output
def cmorlet(self, points, s, wavelet_width=1, center_freq=1):
x = torch.arange(0, points) - (points - 1.0) / 2
x = x / s
norm_constant = np.sqrt(np.pi * wavelet_width)
exp_term = torch.exp(-x ** 2 / wavelet_width)
kernel_base = exp_term / norm_constant
kernel = kernel_base * torch.exp(1j * 2 * np.pi * center_freq * x)
return kernel
def _build_wavelet_bank(self):
"""This function builds a 2D wavelet filter using wavelets at different scales
Returns:
tensor: Tensor of shape (num_widths, 1, channels, filter_len)
"""
wavelet_bank = [torch.conj(torch.flip(self.wavelet(self.filter_len, w), [-1])) for w in self.widths]
wavelet_bank = torch.stack(wavelet_bank)
wavelet_bank = wavelet_bank.view(wavelet_bank.shape[0], 1, 1, wavelet_bank.shape[1])
wavelet_bank = torch.cat([wavelet_bank] * self.channels, 2)
return wavelet_bank
def forward(self, x):
"""Compute CWT arrays from a batch of multi-channel inputs
Args:
x (torch.tensor): Tensor of shape (batch_size, channels, time)
Returns:
torch.tensor: Tensor of shape (batch_size, channels, widths, time)
"""
x = x.unsqueeze(1)
if self.wavelet_bank.is_complex():
wavelet_real = self.wavelet_bank.real.to(device=x.device, dtype=x.dtype)
wavelet_imag = self.wavelet_bank.imag.to(device=x.device, dtype=x.dtype)
output_real = nn.functional.conv2d(x, wavelet_real, padding='same')
output_imag = nn.functional.conv2d(x, wavelet_imag, padding='same')
output_real = torch.transpose(output_real, 1, 2)
output_imag = torch.transpose(output_imag, 1, 2)
return torch.complex(output_real, output_imag)
else:
self.wavelet_bank = self.wavelet_bank.to(device=x.device, dtype=x.dtype)
output = nn.functional.conv2d(x, self.wavelet_bank, padding='same')
return torch.transpose(output, 1, 2)
widths = np.arange(20, 120)
pycwt = CWT(widths, 'cmorlet', 3, 4096)
def apply_qtransform(waves, transform=CQT1992v2(sr=2048, fmin=21.83, fmax=1024, hop_length=64), cuda=False):
waves = min_max_scaler(butter_bandpass_filter(waves, 21.83, 500, fs, 4))
waves = torch.from_numpy(waves).float().view(1, 3, 4096)
if cuda:
waves = waves.cuda()
image = pycwt(waves)
return image
imgs = []
for i in range(10):
img = apply_qtransform(wave, transform=CQT1992v2(sr=2048, fmin=21.83, n_bins=63, hop_length=64))
imgs.append(img)
print(img.shape) | code |
72068023/cell_19 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.signal import butter, filtfilt, sosfiltfilt
import glob
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
plt.figure(dpi=120)
plt.plot(range(len(wave[0])), y[0]) | code |
72068023/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.signal import butter, filtfilt, sosfiltfilt
import glob
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
plt.figure(dpi=120)
plt.plot(range(len(wave[0])), y[0]) | code |
72068023/cell_24 | [
"image_output_1.png"
] | from numba import njit, jit, cuda, guvectorize
from scipy.signal import butter, filtfilt, sosfiltfilt
from scipy.signal import spectrogram
import glob
import librosa
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from numba import njit, jit, cuda, guvectorize
@njit(nogil=True)
def min_max_scaler(wave):
for i in range(len(wave)):
wave[i] = (wave[i] - min(wave[i])) / (max(wave[i]) - min(wave[i]))
wave[i] = 2 * wave[i] - 1
return wave
wave1 = min_max_scaler(wave)
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
from scipy.signal import spectrogram
for i in range(len(wave)):
f, t, Sxx = spectrogram(wave1[i], fs=10)
plt.pcolormesh(t, f, Sxx, shading='gouraud')
def wrapper_plot(m):
m()
stacked = []
for j in range(len(wave1)):
melspec = librosa.feature.melspectrogram(wave1[j], sr=4096, n_mels=128, fmin=21.83, fmax=2048)
melspec = librosa.power_to_db(melspec)
melspec = melspec.transpose((1, 0))
stacked.append(melspec)
image = np.vstack(stacked)
wrapper_plot(lambda: plt.imshow(image)) | code |
72068023/cell_10 | [
"text_plain_output_1.png"
] | import glob
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
plt.figure(dpi=120)
for i in range(len(wave)):
plt.plot(range(len(wave[i])), wave[i], label=f'label_{i}')
plt.legend() | code |
72068023/cell_37 | [
"text_plain_output_1.png"
] | from numba import njit, jit, cuda, guvectorize
from scipy.signal import butter, filtfilt, sosfiltfilt
from scipy.signal import cwt, ricker
from scipy.signal import spectrogram
import glob
import librosa
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pathlib
import torch
import torch
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
from numba import njit, jit, cuda, guvectorize
@njit(nogil=True)
def min_max_scaler(wave):
for i in range(len(wave)):
wave[i] = (wave[i] - min(wave[i])) / (max(wave[i]) - min(wave[i]))
wave[i] = 2 * wave[i] - 1
return wave
wave1 = min_max_scaler(wave)
from scipy.signal import butter, filtfilt, sosfiltfilt
T = 2
fs = 2048.0
cutoff = 2.5
nyq = 0.5 * fs
order = 3
n = int(T * fs)
normal_cutoff = cutoff / nyq
normal_cutoff = (21.83 / fs, 500 / fs)
def butter_bandpass_filter(data, normal_cutoff, fs, order=2):
b, a = butter(order, normal_cutoff, btype='bandpass', analog=False)
y = filtfilt(b, a, data)
return y
def butter_bandpass_filter(data, low, high, fs, order):
sos = butter(order, [low, high], btype='bandpass', output='sos', fs=fs)
normalization = np.sqrt((high - low) / (fs / 2))
return sosfiltfilt(sos, data) / normalization
def butter_lowpass_filter(data, normal_cutoff, fs, order):
b, a = butter(order, normal_cutoff, btype='lowpass', analog=False)
y = filtfilt(b, a, data)
return y
data = torch.from_numpy(wave)
y = butter_bandpass_filter(data, 21.83, 500, fs, 4)
from scipy.signal import spectrogram
for i in range(len(wave)):
f, t, Sxx = spectrogram(wave1[i], fs=10)
plt.pcolormesh(t, f, Sxx, shading='gouraud')
def wrapper_plot(m):
m()
stacked = []
for j in range(len(wave1)):
melspec = librosa.feature.melspectrogram(wave1[j], sr=4096, n_mels=128, fmin=21.83, fmax=2048)
melspec = librosa.power_to_db(melspec)
melspec = melspec.transpose((1, 0))
stacked.append(melspec)
image = np.vstack(stacked)
wave.shape
cwt(wave[0], signal.ricker, np.arange(1, 300)) | code |
72068023/cell_36 | [
"text_plain_output_1.png"
] | import glob
import numpy as np # linear algebra
import pathlib
import glob
import pathlib
head = pathlib.Path('../input/g2net-gravitational-wave-detection')
train_files = sorted(glob.glob('../input/g2net-gravitational-wave-detection/train/*/*/*/*.npy'))
wave = np.load(train_files[0])
wave.shape | code |
90123330/cell_24 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
Nomes = 'Masculino Feminino'.split()
Med = [1500, 1500]
import matplotlib.pyplot as plt
Nomes = 'Masculino Feminino'.split()
Med = [1500, 1500]
plt.pie(Med, labels=Nomes)
plt.show() | code |
90123330/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
Nomes = 'Masculino Feminino'.split()
Med = [1500, 1500]
plt.pie(Med, labels=Nomes)
plt.show() | code |
74054195/cell_42 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
model_lr_smt = LogisticRegression(solver='liblinear')
model_lr_smt.fit(X_train, y_train) | code |
74054195/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
fraud.Amount.describe() | code |
74054195/cell_23 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape
new_df.groupby('Class').mean()
X = new_df.drop(columns='Class', axis=1)
Y = new_df['Class']
Y.shape | code |
74054195/cell_30 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
import numpy as np
import numpy as np # linear algebra
import seaborn as sns
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
cm1 = confusion_matrix(y_test, tst_lr_pred)
sns.heatmap(cm1 / np.sum(cm1), annot=True, fmt='0.2%', cmap='Reds') | code |
74054195/cell_44 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
trn_lr_smt_pred = model_lr.predict(X_train)
trn_lr_smt_acc = accuracy_score(trn_lr_smt_pred, y_train)
tst_lr_smt_pred = model_lr.predict(X_test)
tst_lr_smt_acc = accuracy_score(tst_lr_smt_pred, y_test)
print(round(tst_lr_smt_acc * 100, 2)) | code |
74054195/cell_20 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape
new_df.groupby('Class').mean() | code |
74054195/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.info() | code |
74054195/cell_40 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
df.groupby('Class').mean()
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
cm1 = confusion_matrix(y_test, tst_lr_pred)
df.shape
X1 = df.drop(columns='Class', axis=1)
y1 = df['Class']
(X1.shape, y1.shape)
unique_original, counts_original = np.unique(y1, return_counts=True)
unique_oversampled, counts_oversampled = np.unique(y_oversampled, return_counts=True)
print('Original fraud class distribution:', dict(zip(unique_original, counts_original)))
print('New transformed fraud class distribution:', dict(zip(unique_oversampled, counts_oversampled))) | code |
74054195/cell_39 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
pipeline = Pipeline([('model', LogisticRegression(solver='liblinear'))])
pipeline.fit(X_oversampled, y_oversampled) | code |
74054195/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train) | code |
74054195/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
print(legit.shape)
print(fraud.shape) | code |
74054195/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape
new_df['Class'].value_counts() | code |
74054195/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
74054195/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum() | code |
74054195/cell_45 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
import numpy as np
import numpy as np # linear algebra
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
df.groupby('Class').mean()
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
cm1 = confusion_matrix(y_test, tst_lr_pred)
df.shape
X1 = df.drop(columns='Class', axis=1)
y1 = df['Class']
(X1.shape, y1.shape)
unique_original, counts_original = np.unique(y1, return_counts=True)
unique_oversampled, counts_oversampled = np.unique(y_oversampled, return_counts=True)
trn_lr_smt_pred = model_lr.predict(X_train)
trn_lr_smt_acc = accuracy_score(trn_lr_smt_pred, y_train)
tst_lr_smt_pred = model_lr.predict(X_test)
tst_lr_smt_acc = accuracy_score(tst_lr_smt_pred, y_test)
cm2 = confusion_matrix(y_test, tst_lr_smt_pred)
sns.heatmap(cm2 / np.sum(cm2), annot=True, fmt='0.2%', cmap='Reds') | code |
74054195/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape
new_df.head() | code |
74054195/cell_32 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
print(classification_report(y_test, tst_lr_pred)) | code |
74054195/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
print(round(tst_lr_acc * 100, 2)) | code |
74054195/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
df['Class'].value_counts() | code |
74054195/cell_47 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
trn_lr_smt_pred = model_lr.predict(X_train)
trn_lr_smt_acc = accuracy_score(trn_lr_smt_pred, y_train)
tst_lr_smt_pred = model_lr.predict(X_test)
tst_lr_smt_acc = accuracy_score(tst_lr_smt_pred, y_test)
print(classification_report(y_test, tst_lr_smt_pred)) | code |
74054195/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape | code |
74054195/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
df.groupby('Class').mean()
df.shape | code |
74054195/cell_43 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
trn_lr_smt_pred = model_lr.predict(X_train)
trn_lr_smt_acc = accuracy_score(trn_lr_smt_pred, y_train)
print(round(trn_lr_smt_acc * 100, 2)) | code |
74054195/cell_31 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
import matplotlib.pyplot as plt
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
fig, ax = plt.subplots(figsize=(12, 8))
plot_roc_curve(model_lr, X_test, y_test, color='darkgreen', ax=ax) | code |
74054195/cell_46 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
import matplotlib.pyplot as plt
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
tst_lr_pred = model_lr.predict(X_test)
tst_lr_acc = accuracy_score(tst_lr_pred, y_test)
fig, ax = plt.subplots(figsize=(12, 8))
plot_roc_curve(model_lr, X_test, y_test, color='darkgreen', ax=ax)
model_lr_smt = LogisticRegression(solver='liblinear')
model_lr_smt.fit(X_train, y_train)
fig, ax = plt.subplots(figsize=(12, 8))
plot_roc_curve(model_lr_smt, X_test, y_test, color='darkgreen', ax=ax) | code |
74054195/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
df.groupby('Class').mean() | code |
74054195/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit_sample = legit.sample(n=492)
new_df = pd.concat([legit_sample, fraud], axis=0)
new_df.shape
new_df.groupby('Class').mean()
X = new_df.drop(columns='Class', axis=1)
Y = new_df['Class']
X.shape | code |
74054195/cell_27 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, plot_roc_curve, classification_report
model_lr = LogisticRegression(max_iter=120, random_state=0, n_jobs=20, solver='liblinear')
model_lr.fit(X_train, y_train)
trn_lr_pred = model_lr.predict(X_train)
trn_lr_acc = accuracy_score(trn_lr_pred, y_train)
print(round(trn_lr_acc * 100, 2)) | code |
74054195/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
df.groupby('Class').mean()
df.shape
X1 = df.drop(columns='Class', axis=1)
y1 = df['Class']
(X1.shape, y1.shape) | code |
74054195/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.isnull().sum()
legit = df[df.Class == 0]
fraud = df[df.Class == 1]
legit.Amount.describe() | code |
74054195/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
main_df = pd.read_csv('/kaggle/input/creditcardfraud/creditcard.csv')
df = main_df.copy()
df.head() | code |
106202730/cell_4 | [
"image_output_1.png"
] | import igraph as ig
import igraph as ig
import igraph as ig
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.cliques(4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(3, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.maximal_cliques(min = 4)# (4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(1, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
cliques = g.largest_cliques()
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(ig.VertexCover(g, [clique]), mark_groups=True, palette=ig.RainbowPalette(), edge_width=0.5, target=ax)
plt.axis('off')
plt.show() | code |
106202730/cell_2 | [
"image_output_1.png"
] | import igraph as ig
import matplotlib.pyplot as plt
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
cliques = g.cliques(4, 4)
fig, axs = plt.subplots(3, 4, figsize=(20, 10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(ig.VertexCover(g, [clique]), mark_groups=True, palette=ig.RainbowPalette(), edge_width=0.5, target=ax)
plt.axis('off')
plt.show() | code |
106202730/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106202730/cell_8 | [
"image_output_1.png"
] | import igraph as ig
import igraph as ig
import igraph as ig
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.cliques(4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(3, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.maximal_cliques(min = 4)# (4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(1, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.largest_cliques()# (min = 4)# (4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(1, 2, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
subgraph_vs = g.subgraph(cliques[0])
fig, axs = plt.subplots(1, 2, figsize=(20, 10))
ig.plot(subgraph_vs, mark_groups=True, palette=ig.RainbowPalette(), edge_width=0.5, target=axs[0])
plt.axis('off')
plt.show() | code |
106202730/cell_3 | [
"image_output_1.png"
] | import igraph as ig
import igraph as ig
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.cliques(4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(3, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
cliques = g.maximal_cliques(min=4)
fig, axs = plt.subplots(1, 4, figsize=(20, 10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(ig.VertexCover(g, [clique]), mark_groups=True, palette=ig.RainbowPalette(), edge_width=0.5, target=ax)
plt.axis('off')
plt.show() | code |
106202730/cell_5 | [
"text_plain_output_1.png"
] | import igraph as ig
import igraph as ig
import igraph as ig
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.cliques(4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(3, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.maximal_cliques(min = 4)# (4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(1, 4, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
()
import igraph as ig
import matplotlib.pyplot as plt
g = ig.Graph.Famous('Zachary')
# Compute cliques
cliques = g.largest_cliques()# (min = 4)# (4, 4)
# Plot each clique highlighted in a separate axes
fig, axs = plt.subplots(1, 2, figsize = (20,10))
axs = axs.ravel()
for clique, ax in zip(cliques, axs):
ig.plot(
ig.VertexCover(g, [clique]),
mark_groups=True, palette=ig.RainbowPalette(),
edge_width=0.5,
target=ax,
)
plt.axis('off')
plt.show()
cliques[0] | code |
50211763/cell_9 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo,Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa[1:20]) | code |
50211763/cell_4 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa[0]) | code |
50211763/cell_20 | [
"text_plain_output_1.png"
] | string = 'Qbert'
add_string = '!!!'
a = string.replace('b', '*')
print(a + str(add_string)) | code |
50211763/cell_6 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa[-3:]) | code |
50211763/cell_2 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
print('Jumlah total karakter dalam string :', len(sapa)) | code |
50211763/cell_11 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo,Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa.replace('a', 'e')) | code |
50211763/cell_7 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa[::-1]) | code |
50211763/cell_18 | [
"text_plain_output_1.png"
] | def vowelcheck():
l = input('Enter a word : ')
if 'a' in l:
return 'Your word contains a vowel'
if 'e' in l:
return 'Your word contains a vowel'
if 'i' in l:
return 'Your word contains a vowel'
if 'o' in l:
return 'Your word contains a vowel'
if 'u' in l:
return 'Your word contains a vowel'
else:
return 'your word has only consonants '
print(vowelcheck()) | code |
50211763/cell_8 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo,Selamat Datang'
print(sapa[7]) | code |
50211763/cell_16 | [
"text_plain_output_1.png"
] | str1 = input('Please Enter your Own String : ')
total = 1
for i in range(len(str1)):
if str1[i] == ' ' or str1 == '\n' or str1 == '\t':
total = total + 1
class py_solution:
def is_valid_parenthese(self, str1):
stack, pchar = ([], {'(': ')', '{': '}', '[': ']'})
for parenthese in str1:
if parenthese in pchar:
stack.append(parenthese)
elif len(stack) == 0 or pchar[stack.pop()] != parenthese:
return False
return len(stack) == 0
print(py_solution().is_valid_parenthese('(){}[]'))
print(py_solution().is_valid_parenthese('()[{)}'))
print(py_solution().is_valid_parenthese('()')) | code |
50211763/cell_3 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa * 10) | code |
50211763/cell_14 | [
"text_plain_output_1.png"
] | str1 = input('Please Enter your Own String : ')
total = 1
for i in range(len(str1)):
if str1[i] == ' ' or str1 == '\n' or str1 == '\t':
total = total + 1
print('Total Number of Words in this String = ', total) | code |
50211763/cell_10 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo,Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa.upper()) | code |
50211763/cell_12 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo,Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa.replace('Hallo, Selamat Datang', ' ')) | code |
50211763/cell_5 | [
"text_plain_output_1.png"
] | sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
sapa = 'Hallo, Selamat Datang'
print(sapa[:3]) | code |
105193863/cell_21 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers, models, Input
from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks)
from tensorflow.keras import layers, models, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
def VGG16(nb_classes, input_shape):
input_tensor = Input(shape=input_shape)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='fc1')(x)
x = Dense(512, activation='relu', name='fc2')(x)
output_tensor = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(input_tensor, output_tensor)
return model
model = VGG16(len(class_names), (img_width, img_height, 3))
model.summary() | code |
105193863/cell_9 | [
"image_output_1.png"
] | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
print(class_names) | code |
105193863/cell_23 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers, models, Input
from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks)
from tensorflow.keras import layers, models, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
def VGG16(nb_classes, input_shape):
input_tensor = Input(shape=input_shape)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='fc1')(x)
x = Dense(512, activation='relu', name='fc2')(x)
output_tensor = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(input_tensor, output_tensor)
return model
model = VGG16(len(class_names), (img_width, img_height, 3))
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks)
plt_history(history) | code |
105193863/cell_19 | [
"text_plain_output_1.png"
] | from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks)
plt_history(history) | code |
105193863/cell_7 | [
"text_plain_output_1.png"
] | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size) | code |
105193863/cell_18 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_1.png"
] | from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks) | code |
105193863/cell_8 | [
"text_plain_output_1.png"
] | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size) | code |
105193863/cell_16 | [
"text_plain_output_1.png"
] | from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary() | code |
105193863/cell_3 | [
"application_vnd.jupyter.stderr_output_2.png",
"text_plain_output_3.png",
"text_plain_output_1.png"
] | from IPython.display import clear_output
!pip install -q tensorflow==2.4.1
clear_output()
import tensorflow as tf
gpus = tf.config.list_physical_devices("GPU")
if gpus:
tf.config.experimental.set_memory_growth(gpus[0], True) #设置GPU显存用量按需使用
tf.config.set_visible_devices([gpus[0]],"GPU") | code |
105193863/cell_22 | [
"application_vnd.jupyter.stderr_output_1.png",
"image_output_1.png"
] | from tensorflow.keras import layers, models, Input
from tensorflow.keras import layers,models
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='validation', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4)) # 图形的宽为10高为5
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
normalization_layer = layers.experimental.preprocessing.Rescaling(1.0 / 255)
train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
epochs = 50
callbacks = [ModelCheckpoint('vgg16_best.h5', monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True), EarlyStopping(monitor='val_accuracy', min_delta=0.001, patience=10, verbose=1)]
def plt_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(len(loss))
def compile_model(model):
initial_learning_rate = 0.0001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate, decay_steps=30, decay_rate=0.92, staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=initial_learning_rate)
model.compile(optimizer=opt, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model = tf.keras.applications.VGG16(weights='imagenet', input_shape=(224, 224, 3), include_top=False)
output = model.output
output = tf.keras.layers.Flatten()(output)
output = tf.keras.layers.Dense(1024, activation='relu', name='dese-1024')(output)
output = tf.keras.layers.Dense(512, activation='relu', name='dese-512')(output)
output = tf.keras.layers.Dense(len(class_names), activation='softmax', name='softmax-result')(output)
model = tf.keras.models.Model(model.input, output, name='VGG16')
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks)
from tensorflow.keras import layers, models, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
def VGG16(nb_classes, input_shape):
input_tensor = Input(shape=input_shape)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='fc1')(x)
x = Dense(512, activation='relu', name='fc2')(x)
output_tensor = Dense(nb_classes, activation='softmax', name='predictions')(x)
model = Model(input_tensor, output_tensor)
return model
model = VGG16(len(class_names), (img_width, img_height, 3))
model.summary()
compile_model(model)
history = model.fit(train_ds, validation_data=val_ds, epochs=epochs, callbacks=callbacks) | code |
105193863/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
batch_size = 32
img_height = 224
img_width = 224
"""
关于image_dataset_from_directory()的详细介绍可以参考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir, validation_split=0.2, subset='training', seed=123, image_size=(img_height, img_width), batch_size=batch_size)
class_names = train_ds.class_names
plt.figure(figsize=(10, 4))
for images, labels in train_ds.take(1):
for i in range(10):
ax = plt.subplot(2, 5, i + 1)
plt.imshow(images[i].numpy().astype('uint8'))
plt.title(class_names[labels[i]])
plt.axis('off') | code |
105193863/cell_5 | [
"image_output_1.png"
] | from tensorflow import keras
from tensorflow.keras import layers, models
import numpy as np
import matplotlib.pyplot as plt
import os, PIL, pathlib
data_dir = '../input/kafeidou/49-data'
data_dir = pathlib.Path(data_dir)
image_count = len(list(data_dir.glob('*/*.png')))
print('图片总数为:', image_count) | code |
88075725/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv')
omicron_data = pd.DataFrame(omicron_data)
omicron_data.loc[(omicron_data['location'] == 'Thailand') & (omicron_data['variant'] == 'Omicron')] | code |
88075725/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv')
omicron_data = pd.DataFrame(omicron_data)
omicron_data.loc[(omicron_data['location'] == 'Thailand') & (omicron_data['variant'] == 'Omicron')]
omicron_num_seq_Thailand = omicron_data[omicron_data['location'] == 'Thailand']['num_sequences_total']
omicron_date_Thailand = omicron_data[omicron_data['location'] == 'Thailand']['date']
plt.figure(figsize=(50, 10))
plt.xlabel('date')
plt.ylabel('total cases')
plt.title('Omicron Cases in Thailand')
plt.plot(omicron_date_Thailand, omicron_num_seq_Thailand)
plt.show() | code |
88075725/cell_3 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
88075725/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv')
omicron_data = pd.DataFrame(omicron_data)
omicron_data.head() | code |
33096616/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from PIL import Image
import matplotlib.pyplot as plt
import os
base = '/kaggle/input/alaska2-image-steganalysis/'
id = '{:05d}'.format(20)
cover_path = os.path.join(base, 'Cover', id + '.jpg')
img = plt.imread(cover_path)
from PIL import Image
def genData(data):
newd = []
for i in data:
newd.append(format(ord(i), '08b'))
return newd
def modPix(pix, data):
datalist = genData(data)
lendata = len(datalist)
imdata = iter(pix)
for i in range(lendata):
pix = [value for value in imdata.__next__()[:3] + imdata.__next__()[:3] + imdata.__next__()[:3]]
for j in range(0, 8):
if datalist[i][j] == '0' and pix[j] % 2 != 0:
if pix[j] % 2 != 0:
pix[j] -= 1
elif datalist[i][j] == '1' and pix[j] % 2 == 0:
pix[j] -= 1
if i == lendata - 1:
if pix[-1] % 2 == 0:
pix[-1] -= 1
elif pix[-1] % 2 != 0:
pix[-1] -= 1
pix = tuple(pix)
yield pix[0:3]
yield pix[3:6]
yield pix[6:9]
def encode_enc(newimg, data):
w = newimg.size[0]
x, y = (0, 0)
for pixel in modPix(newimg.getdata(), data):
newimg.putpixel((x, y), pixel)
if x == w - 1:
x = 0
y += 1
else:
x += 1
return newimg
def encode():
img = input('Enter image name(with extension): ')
image = Image.open(img, 'r')
data = input('Enter data to be encoded : ')
if len(data) == 0:
raise ValueError('Data is empty')
newimg = image.copy()
img = encode_enc(newimg, data)
plt.imshow(img)
def decode():
img = input('Enter image name(with extension) :')
image = Image.open(img, 'r')
data = ''
imgdata = iter(image.getdata())
while True:
pixels = [value for value in imgdata.__next__()[:3] + imgdata.__next__()[:3] + imgdata.__next__()[:3]]
binstr = ''
for i in pixels[:8]:
if i % 2 == 0:
binstr += '0'
else:
binstr += '1'
data += chr(int(binstr, 2))
if pixels[-1] % 2 != 0:
return data
def main():
a = int(input(':: Welcome to Steganography ::\n1. Encode\n 2. Decode\n'))
if a == 1:
encode()
elif a == 2:
print('Decoded word- ' + decode())
else:
raise Exception('Enter correct input')
if __name__ == '__main__':
main() | code |
33096616/cell_4 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import os
base = '/kaggle/input/alaska2-image-steganalysis/'
id = '{:05d}'.format(20)
cover_path = os.path.join(base, 'Cover', id + '.jpg')
img = plt.imread(cover_path)
cover_path | code |
33096616/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import os
base = '/kaggle/input/alaska2-image-steganalysis/'
id = '{:05d}'.format(20)
cover_path = os.path.join(base, 'Cover', id + '.jpg')
img = plt.imread(cover_path)
plt.imshow(img) | code |
90123428/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.dtypes
train_data.isnull().sum()
test_data.isnull().sum()
train_data.fillna(train_data.Age.mean(), inplace=True)
train_data['Cabin'].fillna('Unknown', inplace=True)
train_data['Embarked'].fillna('Unknown', inplace=True)
test_data.fillna(test_data['Age'].mean(), inplace=True)
test_data.fillna(test_data['Fare'].mean(), inplace=True)
test_data['Cabin'].fillna('Unknown', inplace=True)
train_data.groupby(['Sex'])['Survived'].value_counts(normalize=True)
train_data.groupby(['Pclass'])['Survived'].value_counts(normalize=True)
y = train_data['Survived'].values
features = train_data[['Pclass', 'Sex', 'SibSp', 'Parch']]
X = pd.get_dummies(features)
X.head() | code |
90123428/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
train_data.dtypes
train_data.isnull().sum()
test_data.isnull().sum()
train_data.fillna(train_data.Age.mean(), inplace=True)
train_data['Cabin'].fillna('Unknown', inplace=True)
train_data['Embarked'].fillna('Unknown', inplace=True)
test_data.fillna(test_data['Age'].mean(), inplace=True)
test_data.fillna(test_data['Fare'].mean(), inplace=True)
test_data['Cabin'].fillna('Unknown', inplace=True)
test_data['Age'].describe() | code |
90123428/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data.isnull().sum() | code |
90123428/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data.head() | code |
90123428/cell_23 | [
"text_plain_output_1.png"
] | from sklearn import metrics
from sklearn.neighbors import KNeighborsClassifier
import numpy as np # linear algebra
Ks = 100
mean_acc = np.zeros(Ks - 1)
std_acc = np.zeros(Ks - 1)
for n in range(1, Ks):
neigh = KNeighborsClassifier(n_neighbors=n).fit(x_train, y_train)
yhat = neigh.predict(x_test)
mean_acc[n - 1] = metrics.accuracy_score(y_test, yhat)
std_acc[n - 1] = np.std(yhat == y_test) / np.sqrt(yhat.shape[0])
print('The best accuracy was with', mean_acc.max(), 'with k=', mean_acc.argmax() + 1) | code |
90123428/cell_33 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.tree import DecisionTreeClassifier
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2 * n for n in range(1, 10)], 'max_features': ['auto', 'sqrt'], 'min_samples_leaf': [1, 2, 4], 'min_samples_split': [2, 5, 10]}
tree = DecisionTreeClassifier()
tree_cv = GridSearchCV(tree, parameters, cv=10)
tree_cv.fit(x_train, y_train)
parameters = {'C': [0.01, 0.1, 1], 'penalty': ['l2'], 'solver': ['lbfgs', 'newton-cg', 'liblinear', 'sag', 'saga']}
lr = LogisticRegression()
logreg_cv = GridSearchCV(lr, parameters, cv=10)
logreg_cv.fit(x_train, y_train)
print('tuned hpyerparameters :(best parameters) ', logreg_cv.best_params_)
print('accuracy :', logreg_cv.best_score_) | code |
90123428/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('/kaggle/input/titanic/train.csv')
test_data = pd.read_csv('/kaggle/input/titanic/test.csv')
test_data['Age'].describe() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.