path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
73079642/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from tensorflow.keras import losses inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary()
code
73079642/cell_33
[ "image_output_1.png" ]
from keras.datasets import mnist, cifar10 from keras.layers import Conv2DTranspose, BatchNormalization, add, LeakyReLU from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from keras.optimizers import Adam from tensorflow.keras import layers, losses import matplotlib.pyplot as plt import numpy as np import tensorflow as tf def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) (cifar_train, _), (cifar_test, _) = cifar10.load_data() rows = 2 cols = 3 channel = 3 cifar_train, cifar_test = preprocess(cifar_train, cifar_test, channel) cifar_train_noise, cifar_test_noise = noise(cifar_train, cifar_test, channel) inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() size = 32 channel = 3 inputs = Input(shape=(size, size, channel)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) skip = Conv2D(32, 3, padding='same')(x) x = LeakyReLU()(skip) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = BatchNormalization()(x) encoded = MaxPool2D()(x) x = Conv2DTranspose(64, 3, activation='relu', strides=(2, 2), padding='same')(encoded) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, activation='relu', strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, padding='same')(x) x = add([x, skip]) x = LeakyReLU()(x) x = BatchNormalization()(x) decoded = Conv2DTranspose(3, 3, activation='sigmoid', strides=(2, 2), padding='same')(x) autoencoder2 = Model(inputs, decoded) autoencoder2.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy') autoencoder2.summary() epochs = 25 batch_size = 256 history2 = autoencoder2.fit(cifar_train_noise, cifar_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(cifar_test_noise, cifar_test))
code
73079642/cell_6
[ "image_output_1.png" ]
from keras.datasets import mnist, cifar10 import matplotlib.pyplot as plt import numpy as np def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) display(2, 3, train_data, noisy_train_data, check=True)
code
73079642/cell_29
[ "text_plain_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from keras.models import load_model from tensorflow.keras import layers, losses from tensorflow.keras import losses import matplotlib.pyplot as plt import numpy as np import tensorflow as tf def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) autoencoder1.save('autoencoder_model1.h5') from keras.models import load_model model1 = load_model('autoencoder_model1.h5') num_imgs = 45 rand = np.random.randint(1, 100) test_images = noisy_test_data[rand:rand + num_imgs] test_denoised = model1.predict(test_images) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() autoencoder.compile(optimizer='rmsprop', loss=losses.MeanSquaredError()) history = autoencoder.fit(noisy_train_data, train_data, epochs=10, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) num_imgs = 45 rand = np.random.randint(1, 100) test_images = noisy_test_data[rand:rand + num_imgs] test_denoised = autoencoder.predict(test_images) display(2, 4, test_images, test_denoised, check=True)
code
73079642/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt from keras.layers import Input, Conv2D, MaxPool2D, UpSampling2D, Dense, Dropout import tensorflow as tf from keras.models import Model from keras.datasets import mnist, cifar10
code
73079642/cell_7
[ "image_output_1.png" ]
from keras.datasets import mnist, cifar10 import matplotlib.pyplot as plt import numpy as np def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) (cifar_train, _), (cifar_test, _) = cifar10.load_data() rows = 2 cols = 3 channel = 3 cifar_train, cifar_test = preprocess(cifar_train, cifar_test, channel) cifar_train_noise, cifar_test_noise = noise(cifar_train, cifar_test, channel) display(rows, cols, cifar_train, cifar_train_noise)
code
73079642/cell_32
[ "image_output_1.png" ]
from keras.datasets import mnist, cifar10 from keras.layers import Conv2DTranspose, BatchNormalization, add, LeakyReLU from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from keras.optimizers import Adam from tensorflow.keras import layers, losses import matplotlib.pyplot as plt import numpy as np import tensorflow as tf def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) (cifar_train, _), (cifar_test, _) = cifar10.load_data() rows = 2 cols = 3 channel = 3 cifar_train, cifar_test = preprocess(cifar_train, cifar_test, channel) cifar_train_noise, cifar_test_noise = noise(cifar_train, cifar_test, channel) inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() size = 32 channel = 3 inputs = Input(shape=(size, size, channel)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) skip = Conv2D(32, 3, padding='same')(x) x = LeakyReLU()(skip) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = BatchNormalization()(x) encoded = MaxPool2D()(x) x = Conv2DTranspose(64, 3, activation='relu', strides=(2, 2), padding='same')(encoded) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, activation='relu', strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, padding='same')(x) x = add([x, skip]) x = LeakyReLU()(x) x = BatchNormalization()(x) decoded = Conv2DTranspose(3, 3, activation='sigmoid', strides=(2, 2), padding='same')(x) autoencoder2 = Model(inputs, decoded) autoencoder2.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy') autoencoder2.summary()
code
73079642/cell_28
[ "image_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from tensorflow.keras import layers, losses from tensorflow.keras import losses import matplotlib.pyplot as plt import tensorflow as tf # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) def plot_diag(history): f = plt.figure(figsize=(10,7)) f.add_subplot() #Adding Subplot plt.plot(history.epoch, history.history['loss'], label = "loss") # Loss curve for training set plt.plot(history.epoch, history.history['val_loss'], label = "val_loss") # Loss curve for validation set plt.title("Loss Curve",fontsize=18) plt.xlabel("Epochs",fontsize=15) plt.ylabel("Loss",fontsize=15) plt.grid(alpha=0.3) plt.legend() plt.savefig("Loss_curve.png") plt.show() plot_diag(history1) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() autoencoder.compile(optimizer='rmsprop', loss=losses.MeanSquaredError()) history = autoencoder.fit(noisy_train_data, train_data, epochs=10, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) plot_diag(history)
code
73079642/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from tensorflow.keras import losses inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data))
code
73079642/cell_17
[ "text_plain_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from tensorflow.keras import losses import matplotlib.pyplot as plt # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) def plot_diag(history): f = plt.figure(figsize=(10, 7)) f.add_subplot() plt.plot(history.epoch, history.history['loss'], label='loss') plt.plot(history.epoch, history.history['val_loss'], label='val_loss') plt.title('Loss Curve', fontsize=18) plt.xlabel('Epochs', fontsize=15) plt.ylabel('Loss', fontsize=15) plt.grid(alpha=0.3) plt.legend() plt.savefig('Loss_curve.png') plt.show() plot_diag(history1)
code
73079642/cell_35
[ "text_plain_output_1.png" ]
from keras.datasets import mnist, cifar10 from keras.layers import Conv2DTranspose, BatchNormalization, add, LeakyReLU from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from keras.optimizers import Adam from tensorflow.keras import layers, losses from tensorflow.keras import losses import matplotlib.pyplot as plt import numpy as np import tensorflow as tf def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) (cifar_train, _), (cifar_test, _) = cifar10.load_data() rows = 2 cols = 3 channel = 3 cifar_train, cifar_test = preprocess(cifar_train, cifar_test, channel) cifar_train_noise, cifar_test_noise = noise(cifar_train, cifar_test, channel) inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) def plot_diag(history): f = plt.figure(figsize=(10,7)) f.add_subplot() #Adding Subplot plt.plot(history.epoch, history.history['loss'], label = "loss") # Loss curve for training set plt.plot(history.epoch, history.history['val_loss'], label = "val_loss") # Loss curve for validation set plt.title("Loss Curve",fontsize=18) plt.xlabel("Epochs",fontsize=15) plt.ylabel("Loss",fontsize=15) plt.grid(alpha=0.3) plt.legend() plt.savefig("Loss_curve.png") plt.show() plot_diag(history1) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() size = 32 channel = 3 inputs = Input(shape=(size, size, channel)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) skip = Conv2D(32, 3, padding='same')(x) x = LeakyReLU()(skip) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = BatchNormalization()(x) encoded = MaxPool2D()(x) x = Conv2DTranspose(64, 3, activation='relu', strides=(2, 2), padding='same')(encoded) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, activation='relu', strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, padding='same')(x) x = add([x, skip]) x = LeakyReLU()(x) x = BatchNormalization()(x) decoded = Conv2DTranspose(3, 3, activation='sigmoid', strides=(2, 2), padding='same')(x) autoencoder2 = Model(inputs, decoded) autoencoder2.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy') autoencoder2.summary() epochs = 25 batch_size = 256 history2 = autoencoder2.fit(cifar_train_noise, cifar_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(cifar_test_noise, cifar_test)) plot_diag(history2)
code
73079642/cell_27
[ "image_output_1.png" ]
from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from tensorflow.keras import layers, losses from tensorflow.keras import losses import tensorflow as tf inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() autoencoder.compile(optimizer='rmsprop', loss=losses.MeanSquaredError()) history = autoencoder.fit(noisy_train_data, train_data, epochs=10, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data))
code
73079642/cell_37
[ "text_plain_output_1.png" ]
from keras.datasets import mnist, cifar10 from keras.layers import Conv2DTranspose, BatchNormalization, add, LeakyReLU from keras.layers import Input,Conv2D,MaxPool2D, UpSampling2D,Dense, Dropout from keras.models import Model from keras.models import load_model from keras.models import load_model from keras.optimizers import Adam from tensorflow.keras import layers, losses from tensorflow.keras import losses import matplotlib.pyplot as plt import numpy as np import tensorflow as tf def preprocess(array1, array2, channel): """ Normalizes/scales [0,1], divinding by the supplied array and reshapes it into the appropriate format. """ if channel == 1: ar1 = array1.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 ar2 = array2.astype('float32').reshape([-1, 28, 28, 1]) / 255.0 else: ar1 = array1.astype('float32').reshape([-1, 32, 32, 3]) / 255 ar2 = array2.astype('float32').reshape([-1, 32, 32, 3]) / 255 return (ar1, ar2) def noise(a1, a2, channel): """ Adds random noise to each image in the supplied array. """ if channel == 1: noise_factor = 0.2 noisy_arr1 = a1 + noise_factor * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noise_factor * np.random.normal(0.0, 1.0, size=a2.shape) else: noi = 0.1 noisy_arr1 = a1 + noi * np.random.normal(0.0, 1.0, size=a1.shape) noisy_arr2 = a2 + noi * np.random.normal(0.0, 1.0, size=a2.shape) ab1 = np.clip(noisy_arr1, 0, 1) ab2 = np.clip(noisy_arr2, 0, 1) return (ab1, ab2) # Visualization for mnist, cifar10, noisy, denoised/predictions data def display(rows, cols, a, b, check=False ): '''rows: defining no. of rows in figure cols: defining no. of colums in figure a: train images without noise or noisy_image while test prediction b: train images with noise or denoised_image based while test prediction check: default False for 32*32 cifar10, true for 28*28 mnist dataset and any predictions ''' # defining a figure f = plt.figure(figsize=(2*cols,2*rows*2)) for i in range(rows): for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols, (2*i*cols)+(j+1)) if check: plt.imshow(a[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(a[i*cols + j]) plt.axis("off") for j in range(cols): # adding subplot to figure on each iteration f.add_subplot(rows*2,cols,((2*i+1)*cols)+(j+1)) if check: plt.imshow(b[i*cols + j].reshape([28,28]),cmap="Blues") else: plt.imshow(b[i*cols + j]) plt.axis("off") plt.axis("off") #f.suptitle("Sample Training Data",fontsize=18) plt.savefig("ss.png") plt.show() (train_data, _), (test_data, _) = mnist.load_data() channel = 1 train_data, test_data = preprocess(train_data, test_data, channel) noisy_train_data, noisy_test_data = noise(train_data, test_data, channel) (cifar_train, _), (cifar_test, _) = cifar10.load_data() rows = 2 cols = 3 channel = 3 cifar_train, cifar_test = preprocess(cifar_train, cifar_test, channel) cifar_train_noise, cifar_test_noise = noise(cifar_train, cifar_test, channel) inputs = Input(shape=(28, 28, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPool2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) encoded = MaxPool2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Dropout(0.2)(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) from tensorflow.keras import losses autoencoder1 = Model(inputs, decoded) autoencoder1.compile(optimizer='adam', loss=losses.binary_crossentropy) autoencoder1.summary() history1 = autoencoder1.fit(noisy_train_data, train_data, epochs=50, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) autoencoder1.save('autoencoder_model1.h5') from keras.models import load_model model1 = load_model('autoencoder_model1.h5') num_imgs = 45 rand = np.random.randint(1, 100) test_images = noisy_test_data[rand:rand + num_imgs] test_denoised = model1.predict(test_images) from tensorflow.keras import layers, losses class Denoise(Model): """__init__ constructor in OOP This method called when an object is created from the class and it allow the class to initialize the attributes of a class. super()function used to give access to methods and properties of a parent or sibling class """ def __init__(self): super(Denoise, self).__init__() self.encoder = tf.keras.Sequential([layers.Input(shape=(28, 28, 1)), layers.Conv2D(16, (3, 3), activation='relu', padding='same', strides=2), layers.Conv2D(8, (3, 3), activation='relu', padding='same', strides=2)]) self.decoder = tf.keras.Sequential([layers.Conv2DTranspose(8, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2DTranspose(16, kernel_size=3, strides=2, activation='relu', padding='same'), layers.Conv2D(1, kernel_size=(3, 3), activation='sigmoid', padding='same')]) def call(self, x): encoded = self.encoder(x) decoded = self.decoder(encoded) return decoded autoencoder = Denoise() autoencoder.compile(optimizer='rmsprop', loss=losses.MeanSquaredError()) history = autoencoder.fit(noisy_train_data, train_data, epochs=10, batch_size=256, shuffle=True, validation_data=(noisy_test_data, test_data)) num_imgs = 45 rand = np.random.randint(1, 100) test_images = noisy_test_data[rand:rand + num_imgs] test_denoised = autoencoder.predict(test_images) size = 32 channel = 3 inputs = Input(shape=(size, size, channel)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) skip = Conv2D(32, 3, padding='same')(x) x = LeakyReLU()(skip) x = BatchNormalization()(x) x = MaxPool2D()(x) x = Dropout(0.5)(x) x = Conv2D(64, 3, activation='relu', padding='same')(x) x = BatchNormalization()(x) encoded = MaxPool2D()(x) x = Conv2DTranspose(64, 3, activation='relu', strides=(2, 2), padding='same')(encoded) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, activation='relu', strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Conv2DTranspose(32, 3, padding='same')(x) x = add([x, skip]) x = LeakyReLU()(x) x = BatchNormalization()(x) decoded = Conv2DTranspose(3, 3, activation='sigmoid', strides=(2, 2), padding='same')(x) autoencoder2 = Model(inputs, decoded) autoencoder2.compile(optimizer=Adam(lr=0.0001), loss='binary_crossentropy') autoencoder2.summary() epochs = 25 batch_size = 256 history2 = autoencoder2.fit(cifar_train_noise, cifar_train, epochs=epochs, batch_size=batch_size, shuffle=True, validation_data=(cifar_test_noise, cifar_test)) autoencoder2.save('autoencoder_model.h5') from keras.models import load_model model2 = load_model('autoencoder_model.h5') num_imgs = 48 rand = np.random.randint(1, cifar_test_noise.shape[0] - 48) cifar_test_images = cifar_test_noise[rand:rand + num_imgs] cifar_test_denoised = autoencoder2.predict(cifar_test_images) display(3, 4, cifar_test_images, cifar_test_denoised)
code
129014335/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df['Species'].value_counts()
code
129014335/cell_23
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression model_LR = LogisticRegression() model_LR.fit(X_train, y_train)
code
129014335/cell_30
[ "text_html_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model_DTC = DecisionTreeClassifier() model_DTC.fit(X_train, y_train) predictionDTC = model_DTC.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictionDTC) * 100)
code
129014335/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df.head()
code
129014335/cell_29
[ "text_plain_output_1.png" ]
from sklearn.tree import DecisionTreeClassifier from sklearn.tree import DecisionTreeClassifier model_DTC = DecisionTreeClassifier() model_DTC.fit(X_train, y_train)
code
129014335/cell_26
[ "text_plain_output_1.png" ]
from sklearn.svm import SVC from sklearn.svm import SVC model_SVC = SVC() model_SVC.fit(X_train, y_train)
code
129014335/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) sns.pairplot(df, hue='Species')
code
129014335/cell_7
[ "text_html_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df.describe()
code
129014335/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.svm import SVC import numpy as np from sklearn.svm import SVC model_SVC = SVC() model_SVC.fit(X_train, y_train) predictionSVC = model_SVC.predict(X_test) from sklearn.metrics import accuracy_score X_new = np.array([[3.4, 2.2, 1.5, 0.5], [4.8, 2.3, 3.7, 1.3], [5.1, 2.6, 4.9, 2]]) prediction_new = model_SVC.predict(X_new) print('Prediction of new species : {}'.format(prediction_new))
code
129014335/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df.info()
code
129014335/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns
code
129014335/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df.isnull().sum() from sklearn.preprocessing import LabelEncoder le = LabelEncoder() df['Species'] = le.fit_transform(df['Species']) print(df.head()) print(df[50:55]) print(df.tail())
code
129014335/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression model_LR = LogisticRegression() model_LR.fit(X_train, y_train) predictionLR = model_LR.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictionLR) * 100)
code
129014335/cell_14
[ "text_html_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') df = iris.drop(['Id'], axis=1) df.isnull().sum()
code
129014335/cell_27
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.svm import SVC from sklearn.svm import SVC model_SVC = SVC() model_SVC.fit(X_train, y_train) predictionSVC = model_SVC.predict(X_test) from sklearn.metrics import accuracy_score print(accuracy_score(y_test, predictionSVC) * 100)
code
129014335/cell_5
[ "text_html_output_1.png" ]
import pandas as pd iris = pd.read_csv('/kaggle/input/iris/Iris.csv') iris.head()
code
128047546/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold
code
128047546/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston['description']
code
128047546/cell_11
[ "text_html_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) y = boston_df['MEDV'] y
code
128047546/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score,mean_squared_error from sklearn.model_selection import KFold import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) y = boston_df['MEDV'] X.shape y.shape kf = KFold(n_splits=3, shuffle=True) type(kf.split(X)) LR = LinearRegression() scores = [] for train_index, test_index in kf.split(X): X_train, X_test, y_train, y_test = (X.iloc[train_index, :], X.iloc[test_index, :], y[train_index], y[test_index]) print('X_train:', X.iloc[train_index, :], 'y_train:', y[train_index], 'X_test:', X.iloc[test_index, :], 'y_test:', y[test_index]) LR.fit(X_train, y_train) y_pred = LR.predict(X_test) score = r2_score(y_test.values, y_pred) scores.append(score) scores
code
128047546/cell_7
[ "text_plain_output_1.png" ]
column_desc = "Boston House Prices dataset\n===========================\n\nNotes\n------\nData Set Characteristics: \n\n :Number of Instances: 506 \n\n :Number of Attributes: 13 numeric/categorical predictive\n \n :Median Value (attribute 14) is usually the target\n\n :Attribute Information (in order):\n - CRIM per capita crime rate by town\n - ZN proportion of residential land zoned for lots over 25,000 sq.ft.\n - INDUS proportion of non-retail business acres per town\n - CHAS Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n - NOX nitric oxides concentration (parts per 10 million)\n - RM average number of rooms per dwelling\n - AGE proportion of owner-occupied units built prior to 1940\n - DIS weighted distances to five Boston employment centres\n - RAD index of accessibility to radial highways\n - TAX full-value property-tax rate per $10,000\n - PTRATIO pupil-teacher ratio by town\n - B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town\n - LSTAT % lower status of the population\n - MEDV Median value of owner-occupied homes in $1000's\n\n :Missing Attribute Values: None\n\n :Creator: Harrison, D. and Rubinfeld, D.L.\n\nThis is a copy of UCI ML housing dataset.\nhttp://archive.ics.uci.edu/ml/datasets/Housing\n\n\nThis dataset was taken from the StatLib library which is maintained at Carnegie Mellon University.\n\nThe Boston house-price data of Harrison, D. and Rubinfeld, D.L. 'Hedonic\nprices and the demand for clean air', J. Environ. Economics & Management,\nvol.5, 81-102, 1978. Used in Belsley, Kuh & Welsch, 'Regression diagnostics\n...', Wiley, 1980. N.B. Various transformations are used in the table on\npages 244-261 of the latter.\n\nThe Boston house-price data has been used in many machine learning papers that address regression\nproblems. \n \n**References**\n\n - Belsley, Kuh & Welsch, 'Regression diagnostics: Identifying Influential Data and Sources of Collinearity', Wiley, 1980. 244-261.\n - Quinlan,R. (1993). Combining Instance-Based and Model-Based Learning. In Proceedings on the Tenth International Conference of Machine Learning, 236-243, University of Massachusetts, Amherst. Morgan Kaufmann.\n - many more! (see http://archive.ics.uci.edu/ml/datasets/Housing)\n" lines = column_desc.split('\n') lines
code
128047546/cell_15
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold kf = KFold(n_splits=3, shuffle=True) kf
code
128047546/cell_16
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) X.shape kf = KFold(n_splits=3, shuffle=True) type(kf.split(X))
code
128047546/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys()
code
128047546/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import KFold import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) X.shape kf = KFold(n_splits=3, shuffle=True) type(kf.split(X)) for training_set, test_set in kf.split(X): print('Training set', training_set, '\n', 'Total:', len(training_set)) print('Test set', test_set, '\n', 'Total:', len(test_set)) print(' ')
code
128047546/cell_24
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score,mean_squared_error from sklearn.model_selection import KFold from sklearn.model_selection import cross_val_predict from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) y = boston_df['MEDV'] X.shape y.shape kf = KFold(n_splits=3, shuffle=True) type(kf.split(X)) LR = LinearRegression() scores = [] for train_index, test_index in kf.split(X): X_train, X_test, y_train, y_test = (X.iloc[train_index, :], X.iloc[test_index, :], y[train_index], y[test_index]) LR.fit(X_train, y_train) y_pred = LR.predict(X_test) score = r2_score(y_test.values, y_pred) scores.append(score) scores steps = Pipeline([('std_scaler', StandardScaler()), ('Lin_Reg', LinearRegression())]) from sklearn.model_selection import cross_val_predict predict = cross_val_predict(steps, X, y, cv=kf) r2_score(y, predict)
code
128047546/cell_22
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import r2_score,mean_squared_error from sklearn.model_selection import KFold import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) y = boston_df['MEDV'] X.shape y.shape kf = KFold(n_splits=3, shuffle=True) type(kf.split(X)) LR = LinearRegression() scores = [] for train_index, test_index in kf.split(X): X_train, X_test, y_train, y_test = (X.iloc[train_index, :], X.iloc[test_index, :], y[train_index], y[test_index]) LR.fit(X_train, y_train) y_pred = LR.predict(X_test) score = r2_score(y_test.values, y_pred) scores.append(score) scores kf
code
128047546/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) X.shape
code
128047546/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] X = boston_df.drop('MEDV', axis=1) y = boston_df['MEDV'] y.shape
code
128047546/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd boston = pd.read_pickle('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-ML240EN-SkillsNetwork/labs/data/boston_housing_clean.pickle') boston.keys() boston_df = boston['dataframe'] boston_df
code
90107754/cell_13
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.describe()
code
90107754/cell_9
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.describe()
code
90107754/cell_34
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Dense from keras.layers import Dense,Dropout,Embedding,LSTM from keras.layers import Embedding from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from keras.preprocessing.text import Tokenizer from tensorflow.keras.layers import Dense, Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import keras import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train.Phrase Y_train = train.Sentiment tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test.Phrase X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_lenght = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_lenght) X_test = pad_sequences(X_test, max_lenght) model = Sequential() inputs = keras.Input(shape=(None,), dtype='int32') model.add(inputs) model.add(Embedding(50000, 128)) model.add(Bidirectional(LSTM(64, return_sequences=True))) model.add(Bidirectional(LSTM(64))) model.add(Dense(5, activation='softmax')) model.summary() model.compile('adam', 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=256, epochs=30) y_pred = model.predict(X_test) y_pred
code
90107754/cell_33
[ "text_plain_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Dense from keras.layers import Dense,Dropout,Embedding,LSTM from keras.layers import Embedding from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from keras.preprocessing.text import Tokenizer from tensorflow.keras.layers import Dense, Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import cudf as pd import keras import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape test_df.shape train_df.isnull().sum() test_df.isnull().sum() train_df.isnull().any().any() test_df.isnull().any().any() train = train_df.to_pandas() test = test_df.to_pandas() X_train = train.Phrase Y_train = train.Sentiment tokenize = Tokenizer() tokenize.fit_on_texts(X_train.values) X_test = test.Phrase X_train = tokenize.texts_to_sequences(X_train) X_test = tokenize.texts_to_sequences(X_test) max_lenght = max([len(s.split()) for s in train['Phrase']]) X_train = pad_sequences(X_train, max_lenght) X_test = pad_sequences(X_test, max_lenght) model = Sequential() inputs = keras.Input(shape=(None,), dtype='int32') model.add(inputs) model.add(Embedding(50000, 128)) model.add(Bidirectional(LSTM(64, return_sequences=True))) model.add(Bidirectional(LSTM(64))) model.add(Dense(5, activation='softmax')) model.summary() model.compile('adam', 'sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(X_train, Y_train, batch_size=256, epochs=30)
code
90107754/cell_11
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.head()
code
90107754/cell_19
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum() test_df.isnull().any().any()
code
90107754/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90107754/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.head()
code
90107754/cell_18
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any()
code
90107754/cell_32
[ "text_plain_output_1.png" ]
from keras.layers import Bidirectional from keras.layers import Dense from keras.layers import Dense,Dropout,Embedding,LSTM from keras.layers import Embedding from keras.layers import LSTM from keras.models import Sequential from keras.models import Sequential from tensorflow.keras.layers import Dense, Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.models import Sequential import keras model = Sequential() inputs = keras.Input(shape=(None,), dtype='int32') model.add(inputs) model.add(Embedding(50000, 128)) model.add(Bidirectional(LSTM(64, return_sequences=True))) model.add(Bidirectional(LSTM(64))) model.add(Dense(5, activation='softmax')) model.summary()
code
90107754/cell_8
[ "text_plain_output_5.png", "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.info()
code
90107754/cell_16
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum()
code
90107754/cell_17
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape test_df.isnull().sum()
code
90107754/cell_24
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train = train_df.to_pandas() sns.countplot(x='Sentiment', data=train)
code
90107754/cell_14
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.shape
code
90107754/cell_22
[ "text_plain_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape train_df.isnull().sum() train_df.isnull().any().any() train_df['Sentiment'].value_counts()
code
90107754/cell_10
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') train_df.shape
code
90107754/cell_12
[ "text_html_output_1.png" ]
import cudf as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/train.tsv.zip', sep='\t') test_df = pd.read_csv('/kaggle/input/sentiment-analysis-on-movie-reviews/test.tsv.zip', sep='\t') test_df.info()
code
90116977/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset.isna().sum().sort_values(ascending=False) state_population = dataset.sort_values(by=['Population'], ascending=False) plt.figure(figsize=(12, 10)) population_cnt = sns.barplot(state_population['Population'], state_population['State/UTs'], palette='Blues_d') plt.title('State/UTs vs. Population') plt.show()
code
90116977/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset.isna().sum().sort_values(ascending=False)
code
90116977/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.tail(10)
code
90116977/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) maha.head()
code
90116977/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import datetime as dt import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90116977/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset1.head()
code
90116977/cell_8
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset1.tail()
code
90116977/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset.describe(include='all')
code
90116977/cell_17
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset.info()
code
90116977/cell_14
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset['State/UTs'].unique()
code
90116977/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape dataset.isna().sum().sort_values(ascending=False) state_population = dataset.sort_values(by=['Population'], ascending=False) print(state_population)
code
90116977/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) today = dataset1[dataset1.Date_YMD == '2021-04-11'] today
code
90116977/cell_12
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.shape
code
90116977/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = '../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv' data2 = '../input/covid-time-series-data-india-till-31oct21/case_time_series.csv' state = '../input/latest-covid19-cases-maharashtra-india/Maharashtra Latest Covid Cases.csv' dataset = pd.read_csv(data) dataset1 = pd.read_csv(data2) maha = pd.read_csv(state) dataset.head(10)
code
128046602/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd df = pd.read_csv('/kaggle/input/kullanlcaklar/Womens_Clothing_E-Commerce_Reviews_1.csv') df = shuffle(df) df = df.reset_index(drop=True) df.head()
code
128046602/cell_20
[ "text_html_output_1.png" ]
from sklearn.utils import shuffle import matplotlib.pyplot as plt import pandas as pd import random import re df = pd.read_csv('/kaggle/input/kullanlcaklar/Womens_Clothing_E-Commerce_Reviews_1.csv') df = shuffle(df) df = df.reset_index(drop=True) N = 10 population_size = 100 mutation_rate = 0.3 generations = 200 text_column_name = 'Title' rating_column_name = 'Rating' def clean_text(text): text = text.lower() text = re.sub('[^\\w\\s]', '', text) return text def tokenize(text): return text.split() def prepare_population(df, text_column_name): all_words = [] for text in df[text_column_name]: clean_review = clean_text(text) tokens = tokenize(clean_review) all_words.extend(tokens) random.shuffle(all_words) sublists = [all_words[i:i + N] for i in range(0, len(all_words), N)] population = sublists return (population, all_words) def count_words(text, word_list): count = 0 for word in word_list: count += text.count(word) return count def fitness(individual): correct_classification = 0 for i in range(len(df)): row = df.iloc[i] text, rating = (row[text_column_name], row[rating_column_name]) pozitif = count_words(text, individual[:N // 2]) negatif = count_words(text, individual[N // 2:]) if pozitif == negatif: if random.random() < 0.5: classification = 1 else: classification = 5 elif pozitif > negatif: classification = 5 else: classification = 1 if classification == rating: correct_classification += 1 return correct_classification / len(df) def fitness2(individual): correct_classification = 0 for i in range(len(df)): row = df.iloc[i] text, rating = (row[text_column_name], row[rating_column_name]) pozitif = count_words(text, individual[:N // 2]) negatif = count_words(text, individual[N // 2:]) if pozitif == negatif: if random.random() < 0.5: classification = 'negative' else: classification = 'positive' elif pozitif > negatif: classification = 'positive' else: classification = 'negative' if classification == rating: correct_classification += 1 return correct_classification / len(df) def crossover(parent1, parent2): n = N // 2 child1 = parent1[:n] + parent2[n:] child2 = parent2[:n] + parent1[n:] return (child1, child2) def mutate(individual, all_words): if random.random() < mutation_rate: i = random.randint(0, len(individual) - 1) new_word = random.choice(all_words) individual[i] = new_word return individual def display_results(population, success_rates, average_success_rates): best_individual = max(population, key=fitness) print("En iyi birey:", best_individual) print("Başarı oranı:", fitness(best_individual)) fig, ax = plt.subplots() ax.plot(success_rates, label="En iyi birey") ax.plot(average_success_rates, label="Ortalama başarı") ax.set_xlabel('Generation') ax.set_ylabel('Success Rate') ax.set_title('Success Rate per Generation') ax.legend() plt.show() def display_results2(population, success_rates, average_success_rates): best_individual = max(population, key=fitness2) print("En iyi birey:", best_individual) print("Başarı oranı:", fitness2(best_individual)) fig, ax = plt.subplots() ax.plot(success_rates, label="En iyi birey") ax.plot(average_success_rates, label="Ortalama başarı") ax.set_xlabel('Generation') ax.set_ylabel('Success Rate') ax.set_title('Success Rate per Generation') ax.legend() plt.show() success_rates = [] average_success_rates = [] for generation in range(generations): fitness_values = [] for individual in population: fitness_values.append(fitness(individual)) average_success_rate = sum(fitness_values) / population_size average_success_rates.append(average_success_rate) children = [] for i in range(population_size // 2): parents = random.choices(population, weights=fitness_values, k=2) child1, child2 = crossover(parents[0], parents[1]) children.append(child1) children.append(child2) mutations = [] for child in children: mutations.append(mutate(child, all_words)) population = mutations best_individual = max(population, key=fitness) success_rate = fitness(best_individual) success_rates.append(success_rate) display_results(population, success_rates, average_success_rates)
code
128046602/cell_2
[ "text_html_output_1.png" ]
import nltk import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize nltk.download('stopwords') nltk.download('punkt')
code
128046602/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('/kaggle/input/kullanlcaklar/Womens_Clothing_E-Commerce_Reviews_1.csv') df.head()
code
128046602/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.utils import shuffle import pandas as pd import random import re df = pd.read_csv('/kaggle/input/kullanlcaklar/Womens_Clothing_E-Commerce_Reviews_1.csv') df = shuffle(df) df = df.reset_index(drop=True) N = 10 population_size = 100 mutation_rate = 0.3 generations = 200 text_column_name = 'Title' rating_column_name = 'Rating' def clean_text(text): text = text.lower() text = re.sub('[^\\w\\s]', '', text) return text def tokenize(text): return text.split() def prepare_population(df, text_column_name): all_words = [] for text in df[text_column_name]: clean_review = clean_text(text) tokens = tokenize(clean_review) all_words.extend(tokens) random.shuffle(all_words) sublists = [all_words[i:i + N] for i in range(0, len(all_words), N)] population = sublists return (population, all_words) def count_words(text, word_list): count = 0 for word in word_list: count += text.count(word) return count def fitness(individual): correct_classification = 0 for i in range(len(df)): row = df.iloc[i] text, rating = (row[text_column_name], row[rating_column_name]) pozitif = count_words(text, individual[:N // 2]) negatif = count_words(text, individual[N // 2:]) if pozitif == negatif: if random.random() < 0.5: classification = 1 else: classification = 5 elif pozitif > negatif: classification = 5 else: classification = 1 if classification == rating: correct_classification += 1 return correct_classification / len(df) def fitness2(individual): correct_classification = 0 for i in range(len(df)): row = df.iloc[i] text, rating = (row[text_column_name], row[rating_column_name]) pozitif = count_words(text, individual[:N // 2]) negatif = count_words(text, individual[N // 2:]) if pozitif == negatif: if random.random() < 0.5: classification = 'negative' else: classification = 'positive' elif pozitif > negatif: classification = 'positive' else: classification = 'negative' if classification == rating: correct_classification += 1 return correct_classification / len(df) population, all_words = prepare_population(df, text_column_name) print(population)
code
88076328/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv') omicron_data = pd.DataFrame(omicron_data) omicron_data.loc[(omicron_data['location'] == 'Thailand') & (omicron_data['variant'] == 'Omicron')]
code
88076328/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv') omicron_data = pd.DataFrame(omicron_data) omicron_data.loc[(omicron_data['location'] == 'Thailand') & (omicron_data['variant'] == 'Omicron')] omicron_num_seq_Thailand = omicron_data[omicron_data['location'] == 'Thailand']['num_sequences_total'] omicron_date_Thailand = omicron_data[omicron_data['location'] == 'Thailand']['date'] plt.figure(figsize=(50, 10)) plt.xlabel('date') plt.ylabel('total cases') plt.title('Omicron Cases in Thailand') plt.plot(omicron_date_Thailand, omicron_num_seq_Thailand) plt.show()
code
88076328/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) omicron_data = pd.read_csv('/kaggle/input/omicron-covid19-variant-daily-cases/covid-variants.csv') omicron_data = pd.DataFrame(omicron_data) omicron_data.head()
code
74041571/cell_18
[ "text_html_output_1.png" ]
from IPython.core.display import display, Markdown from bs4 import BeautifulSoup from pathlib import Path import pandas as pd PUB = Path('../input/30dmlleaderboards/public_lb.html') PRIV = Path('../input/30dmlleaderboards/private_lb.html') CSV_PUB = Path('../input/30dmlleaderboards/30-days-of-ml-publicleaderboard/30-days-of-ml-publicleaderboard.csv') def _strip_all_spaces(series): return series.replace('\\s+', ' ', regex=True).str.strip() def _extract_kernel(element): try: anchor = element.find('a') title = anchor.get('title') href = anchor.get('href') except AttributeError: title, href = ('', '') return (title, href) def _load_pub_dataframe(pathname=PUB): """ Scrape the key data items from the HTML version of the public leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Kernel', 'Score', 'Number of Entries'] for key in keys: if key != 'Kernel': record[key] = rec.find('td', {'data-th': key}).text else: element = rec.find('td', {'data-th': key}) record['KernelTitle'], record['KernelHref'] = _extract_kernel(element) record['Last Entry'] = rec.find_all('span')[-1]['title'] rows.append(record) df = pd.DataFrame(rows) df['Last Entry'] = pd.to_datetime(df['Last Entry'].str.split().str[0:6].str.join(' ')) df['Team Name'] = _strip_all_spaces(df['Team Name']) df = df[df['Team Name'].notna()] df.columns = ['Rank', 'TeamName', 'KernelTitle', 'KernelHref', 'Score', 'Entries', 'Latest'] df = df.drop(['Score', 'Latest'], axis='columns') return df def _load_priv_dataframe(pathname=PRIV): """ Scrape the key data items from the HTML version of the private leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Score'] for key in keys: record[key] = rec.find('td', {'data-th': key}).text change_span = rec.find('td', {'data-th': 'Change'}).find('span') if change_span.find('span', class_='position-change__none'): change = (0, 0) elif change_span.find('span', class_='position-change__risen'): change = (1, int(change_span.find('span', class_='position-change__risen').text)) else: change = (-1, int(change_span.find('span', class_='position-change__fallen').text)) record['ChangeDirection'], record['ChangeNo'] = change rows.append(record) df = pd.DataFrame(rows) df['Team Name'] = _strip_all_spaces(df['Team Name']) df.columns = ['PrivRank', 'TeamName', 'PrivScore', 'ChangeDirection', 'ChangeNo'] return df def _load_csv_dataframe(pathname=CSV_PUB): """ Read the CSV version of the public leaderboard as downloaded from Kaggle. (This will be merged with the scraped version). Return a pandas DataFrame """ df = pd.read_csv(pathname) df.TeamName = df.TeamName.replace('\\s+', ' ', regex=True).str.strip() return df def _load_dataframe(pathname): if pathname == PUB: df = _load_pub_dataframe() elif pathname == CSV_PUB: df = _load_csv_dataframe() return df def _load_and_merge_public_lb(): """ Load and clean the two versions (HTML and CSV) of the public leaderboard. Then merge the two on TeamName. Return a merged pandas DataFrame. """ df = _load_dataframe(PUB) df_pub = _load_dataframe(CSV_PUB) df = df[df['TeamName'].isin(df_pub['TeamName'])] df_pub = df_pub[df_pub['TeamName'].isin(df['TeamName'])] final = df.merge(df_pub, on='TeamName', how='left') final.columns = ['PubRank', 'TeamName', 'KernelTitle', 'KernelHref', 'Entries', 'TeamId', 'SubmissionDate', 'PubScore'] return final def load_data(): """ Load all data (two versions of public and scraped version of private leaderboard). Merge into one DataFrame and set dtypes correctly. Return a pandas DataFrame """ pub_df = _load_and_merge_public_lb() priv_df = _load_priv_dataframe() df = pub_df.merge(priv_df, on='TeamName', how='left') type_dict = {'PubRank': 'int32', 'Entries': 'int32', 'SubmissionDate': 'datetime64', 'PubScore': 'float64', 'PrivRank': 'int32', 'PrivScore': 'float64', 'ChangeDirection': 'category', 'ChangeNo': 'int32'} for key, value in type_dict.items(): df[key] = df[key].astype(value) new_order = ['TeamId', 'TeamName', 'PubRank', 'PubScore', 'PrivRank', 'PrivScore', 'ChangeDirection', 'ChangeNo', 'SubmissionDate', 'Entries', 'KernelTitle', 'KernelHref'] df = df[new_order] return df DF = load_data() DF.sample(5) kernels = DF[DF.KernelHref.str.len() > 0].sort_values(by='PrivRank') kernels = kernels[['PrivRank', 'PrivScore', 'KernelTitle', 'KernelHref']] markdown = '\n| Private Rank | Private Score | Notebook |\n|--------------|--------------:|---------:|\n' for row in kernels.iterrows(): rec = row[1] priv_rank = rec['PrivRank'] priv_score = rec['PrivScore'] title = ' '.join(rec['KernelTitle'].split()) title = title.replace('|', '/') url = rec['KernelHref'] anchor = f'[{title}](https://kaggle.com{url})' line = f'| {priv_rank} | {priv_score} | {anchor} |\n' markdown += line display(Markdown(markdown))
code
74041571/cell_16
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from pathlib import Path import pandas as pd import plotly.express as px PUB = Path('../input/30dmlleaderboards/public_lb.html') PRIV = Path('../input/30dmlleaderboards/private_lb.html') CSV_PUB = Path('../input/30dmlleaderboards/30-days-of-ml-publicleaderboard/30-days-of-ml-publicleaderboard.csv') def _strip_all_spaces(series): return series.replace('\\s+', ' ', regex=True).str.strip() def _extract_kernel(element): try: anchor = element.find('a') title = anchor.get('title') href = anchor.get('href') except AttributeError: title, href = ('', '') return (title, href) def _load_pub_dataframe(pathname=PUB): """ Scrape the key data items from the HTML version of the public leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Kernel', 'Score', 'Number of Entries'] for key in keys: if key != 'Kernel': record[key] = rec.find('td', {'data-th': key}).text else: element = rec.find('td', {'data-th': key}) record['KernelTitle'], record['KernelHref'] = _extract_kernel(element) record['Last Entry'] = rec.find_all('span')[-1]['title'] rows.append(record) df = pd.DataFrame(rows) df['Last Entry'] = pd.to_datetime(df['Last Entry'].str.split().str[0:6].str.join(' ')) df['Team Name'] = _strip_all_spaces(df['Team Name']) df = df[df['Team Name'].notna()] df.columns = ['Rank', 'TeamName', 'KernelTitle', 'KernelHref', 'Score', 'Entries', 'Latest'] df = df.drop(['Score', 'Latest'], axis='columns') return df def _load_priv_dataframe(pathname=PRIV): """ Scrape the key data items from the HTML version of the private leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Score'] for key in keys: record[key] = rec.find('td', {'data-th': key}).text change_span = rec.find('td', {'data-th': 'Change'}).find('span') if change_span.find('span', class_='position-change__none'): change = (0, 0) elif change_span.find('span', class_='position-change__risen'): change = (1, int(change_span.find('span', class_='position-change__risen').text)) else: change = (-1, int(change_span.find('span', class_='position-change__fallen').text)) record['ChangeDirection'], record['ChangeNo'] = change rows.append(record) df = pd.DataFrame(rows) df['Team Name'] = _strip_all_spaces(df['Team Name']) df.columns = ['PrivRank', 'TeamName', 'PrivScore', 'ChangeDirection', 'ChangeNo'] return df def _load_csv_dataframe(pathname=CSV_PUB): """ Read the CSV version of the public leaderboard as downloaded from Kaggle. (This will be merged with the scraped version). Return a pandas DataFrame """ df = pd.read_csv(pathname) df.TeamName = df.TeamName.replace('\\s+', ' ', regex=True).str.strip() return df def _load_dataframe(pathname): if pathname == PUB: df = _load_pub_dataframe() elif pathname == CSV_PUB: df = _load_csv_dataframe() return df def _load_and_merge_public_lb(): """ Load and clean the two versions (HTML and CSV) of the public leaderboard. Then merge the two on TeamName. Return a merged pandas DataFrame. """ df = _load_dataframe(PUB) df_pub = _load_dataframe(CSV_PUB) df = df[df['TeamName'].isin(df_pub['TeamName'])] df_pub = df_pub[df_pub['TeamName'].isin(df['TeamName'])] final = df.merge(df_pub, on='TeamName', how='left') final.columns = ['PubRank', 'TeamName', 'KernelTitle', 'KernelHref', 'Entries', 'TeamId', 'SubmissionDate', 'PubScore'] return final def load_data(): """ Load all data (two versions of public and scraped version of private leaderboard). Merge into one DataFrame and set dtypes correctly. Return a pandas DataFrame """ pub_df = _load_and_merge_public_lb() priv_df = _load_priv_dataframe() df = pub_df.merge(priv_df, on='TeamName', how='left') type_dict = {'PubRank': 'int32', 'Entries': 'int32', 'SubmissionDate': 'datetime64', 'PubScore': 'float64', 'PrivRank': 'int32', 'PrivScore': 'float64', 'ChangeDirection': 'category', 'ChangeNo': 'int32'} for key, value in type_dict.items(): df[key] = df[key].astype(value) new_order = ['TeamId', 'TeamName', 'PubRank', 'PubScore', 'PrivRank', 'PrivScore', 'ChangeDirection', 'ChangeNo', 'SubmissionDate', 'Entries', 'KernelTitle', 'KernelHref'] df = df[new_order] return df DF = load_data() DF.sample(5) RANK = 500 SOURCE = DF[DF['PrivRank'] <= RANK].copy() x_data = SOURCE["Entries"] y_data = SOURCE["PrivRank"] color_data = SOURCE["ChangeDirection"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE["SubmissionDate"] y_data = SOURCE["Entries"] color_data = SOURCE["PrivRank"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE["PrivRank"] y_data = SOURCE["PrivScore"] color_data = SOURCE["ChangeDirection"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE['PubRank'] y_data = SOURCE['PubScore'] color_data = SOURCE['ChangeDirection'] size_data = SOURCE['Entries'] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show()
code
74041571/cell_14
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from pathlib import Path import pandas as pd import plotly.express as px PUB = Path('../input/30dmlleaderboards/public_lb.html') PRIV = Path('../input/30dmlleaderboards/private_lb.html') CSV_PUB = Path('../input/30dmlleaderboards/30-days-of-ml-publicleaderboard/30-days-of-ml-publicleaderboard.csv') def _strip_all_spaces(series): return series.replace('\\s+', ' ', regex=True).str.strip() def _extract_kernel(element): try: anchor = element.find('a') title = anchor.get('title') href = anchor.get('href') except AttributeError: title, href = ('', '') return (title, href) def _load_pub_dataframe(pathname=PUB): """ Scrape the key data items from the HTML version of the public leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Kernel', 'Score', 'Number of Entries'] for key in keys: if key != 'Kernel': record[key] = rec.find('td', {'data-th': key}).text else: element = rec.find('td', {'data-th': key}) record['KernelTitle'], record['KernelHref'] = _extract_kernel(element) record['Last Entry'] = rec.find_all('span')[-1]['title'] rows.append(record) df = pd.DataFrame(rows) df['Last Entry'] = pd.to_datetime(df['Last Entry'].str.split().str[0:6].str.join(' ')) df['Team Name'] = _strip_all_spaces(df['Team Name']) df = df[df['Team Name'].notna()] df.columns = ['Rank', 'TeamName', 'KernelTitle', 'KernelHref', 'Score', 'Entries', 'Latest'] df = df.drop(['Score', 'Latest'], axis='columns') return df def _load_priv_dataframe(pathname=PRIV): """ Scrape the key data items from the HTML version of the private leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Score'] for key in keys: record[key] = rec.find('td', {'data-th': key}).text change_span = rec.find('td', {'data-th': 'Change'}).find('span') if change_span.find('span', class_='position-change__none'): change = (0, 0) elif change_span.find('span', class_='position-change__risen'): change = (1, int(change_span.find('span', class_='position-change__risen').text)) else: change = (-1, int(change_span.find('span', class_='position-change__fallen').text)) record['ChangeDirection'], record['ChangeNo'] = change rows.append(record) df = pd.DataFrame(rows) df['Team Name'] = _strip_all_spaces(df['Team Name']) df.columns = ['PrivRank', 'TeamName', 'PrivScore', 'ChangeDirection', 'ChangeNo'] return df def _load_csv_dataframe(pathname=CSV_PUB): """ Read the CSV version of the public leaderboard as downloaded from Kaggle. (This will be merged with the scraped version). Return a pandas DataFrame """ df = pd.read_csv(pathname) df.TeamName = df.TeamName.replace('\\s+', ' ', regex=True).str.strip() return df def _load_dataframe(pathname): if pathname == PUB: df = _load_pub_dataframe() elif pathname == CSV_PUB: df = _load_csv_dataframe() return df def _load_and_merge_public_lb(): """ Load and clean the two versions (HTML and CSV) of the public leaderboard. Then merge the two on TeamName. Return a merged pandas DataFrame. """ df = _load_dataframe(PUB) df_pub = _load_dataframe(CSV_PUB) df = df[df['TeamName'].isin(df_pub['TeamName'])] df_pub = df_pub[df_pub['TeamName'].isin(df['TeamName'])] final = df.merge(df_pub, on='TeamName', how='left') final.columns = ['PubRank', 'TeamName', 'KernelTitle', 'KernelHref', 'Entries', 'TeamId', 'SubmissionDate', 'PubScore'] return final def load_data(): """ Load all data (two versions of public and scraped version of private leaderboard). Merge into one DataFrame and set dtypes correctly. Return a pandas DataFrame """ pub_df = _load_and_merge_public_lb() priv_df = _load_priv_dataframe() df = pub_df.merge(priv_df, on='TeamName', how='left') type_dict = {'PubRank': 'int32', 'Entries': 'int32', 'SubmissionDate': 'datetime64', 'PubScore': 'float64', 'PrivRank': 'int32', 'PrivScore': 'float64', 'ChangeDirection': 'category', 'ChangeNo': 'int32'} for key, value in type_dict.items(): df[key] = df[key].astype(value) new_order = ['TeamId', 'TeamName', 'PubRank', 'PubScore', 'PrivRank', 'PrivScore', 'ChangeDirection', 'ChangeNo', 'SubmissionDate', 'Entries', 'KernelTitle', 'KernelHref'] df = df[new_order] return df DF = load_data() DF.sample(5) RANK = 500 SOURCE = DF[DF['PrivRank'] <= RANK].copy() x_data = SOURCE["Entries"] y_data = SOURCE["PrivRank"] color_data = SOURCE["ChangeDirection"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE["SubmissionDate"] y_data = SOURCE["Entries"] color_data = SOURCE["PrivRank"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE['PrivRank'] y_data = SOURCE['PrivScore'] color_data = SOURCE['ChangeDirection'] size_data = SOURCE['Entries'] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show()
code
74041571/cell_10
[ "text_html_output_1.png" ]
from bs4 import BeautifulSoup from pathlib import Path import pandas as pd import plotly.express as px PUB = Path('../input/30dmlleaderboards/public_lb.html') PRIV = Path('../input/30dmlleaderboards/private_lb.html') CSV_PUB = Path('../input/30dmlleaderboards/30-days-of-ml-publicleaderboard/30-days-of-ml-publicleaderboard.csv') def _strip_all_spaces(series): return series.replace('\\s+', ' ', regex=True).str.strip() def _extract_kernel(element): try: anchor = element.find('a') title = anchor.get('title') href = anchor.get('href') except AttributeError: title, href = ('', '') return (title, href) def _load_pub_dataframe(pathname=PUB): """ Scrape the key data items from the HTML version of the public leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Kernel', 'Score', 'Number of Entries'] for key in keys: if key != 'Kernel': record[key] = rec.find('td', {'data-th': key}).text else: element = rec.find('td', {'data-th': key}) record['KernelTitle'], record['KernelHref'] = _extract_kernel(element) record['Last Entry'] = rec.find_all('span')[-1]['title'] rows.append(record) df = pd.DataFrame(rows) df['Last Entry'] = pd.to_datetime(df['Last Entry'].str.split().str[0:6].str.join(' ')) df['Team Name'] = _strip_all_spaces(df['Team Name']) df = df[df['Team Name'].notna()] df.columns = ['Rank', 'TeamName', 'KernelTitle', 'KernelHref', 'Score', 'Entries', 'Latest'] df = df.drop(['Score', 'Latest'], axis='columns') return df def _load_priv_dataframe(pathname=PRIV): """ Scrape the key data items from the HTML version of the private leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Score'] for key in keys: record[key] = rec.find('td', {'data-th': key}).text change_span = rec.find('td', {'data-th': 'Change'}).find('span') if change_span.find('span', class_='position-change__none'): change = (0, 0) elif change_span.find('span', class_='position-change__risen'): change = (1, int(change_span.find('span', class_='position-change__risen').text)) else: change = (-1, int(change_span.find('span', class_='position-change__fallen').text)) record['ChangeDirection'], record['ChangeNo'] = change rows.append(record) df = pd.DataFrame(rows) df['Team Name'] = _strip_all_spaces(df['Team Name']) df.columns = ['PrivRank', 'TeamName', 'PrivScore', 'ChangeDirection', 'ChangeNo'] return df def _load_csv_dataframe(pathname=CSV_PUB): """ Read the CSV version of the public leaderboard as downloaded from Kaggle. (This will be merged with the scraped version). Return a pandas DataFrame """ df = pd.read_csv(pathname) df.TeamName = df.TeamName.replace('\\s+', ' ', regex=True).str.strip() return df def _load_dataframe(pathname): if pathname == PUB: df = _load_pub_dataframe() elif pathname == CSV_PUB: df = _load_csv_dataframe() return df def _load_and_merge_public_lb(): """ Load and clean the two versions (HTML and CSV) of the public leaderboard. Then merge the two on TeamName. Return a merged pandas DataFrame. """ df = _load_dataframe(PUB) df_pub = _load_dataframe(CSV_PUB) df = df[df['TeamName'].isin(df_pub['TeamName'])] df_pub = df_pub[df_pub['TeamName'].isin(df['TeamName'])] final = df.merge(df_pub, on='TeamName', how='left') final.columns = ['PubRank', 'TeamName', 'KernelTitle', 'KernelHref', 'Entries', 'TeamId', 'SubmissionDate', 'PubScore'] return final def load_data(): """ Load all data (two versions of public and scraped version of private leaderboard). Merge into one DataFrame and set dtypes correctly. Return a pandas DataFrame """ pub_df = _load_and_merge_public_lb() priv_df = _load_priv_dataframe() df = pub_df.merge(priv_df, on='TeamName', how='left') type_dict = {'PubRank': 'int32', 'Entries': 'int32', 'SubmissionDate': 'datetime64', 'PubScore': 'float64', 'PrivRank': 'int32', 'PrivScore': 'float64', 'ChangeDirection': 'category', 'ChangeNo': 'int32'} for key, value in type_dict.items(): df[key] = df[key].astype(value) new_order = ['TeamId', 'TeamName', 'PubRank', 'PubScore', 'PrivRank', 'PrivScore', 'ChangeDirection', 'ChangeNo', 'SubmissionDate', 'Entries', 'KernelTitle', 'KernelHref'] df = df[new_order] return df DF = load_data() DF.sample(5) RANK = 500 SOURCE = DF[DF['PrivRank'] <= RANK].copy() x_data = SOURCE['Entries'] y_data = SOURCE['PrivRank'] color_data = SOURCE['ChangeDirection'] size_data = SOURCE['Entries'] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show()
code
74041571/cell_12
[ "text_html_output_2.png" ]
from bs4 import BeautifulSoup from pathlib import Path import pandas as pd import plotly.express as px PUB = Path('../input/30dmlleaderboards/public_lb.html') PRIV = Path('../input/30dmlleaderboards/private_lb.html') CSV_PUB = Path('../input/30dmlleaderboards/30-days-of-ml-publicleaderboard/30-days-of-ml-publicleaderboard.csv') def _strip_all_spaces(series): return series.replace('\\s+', ' ', regex=True).str.strip() def _extract_kernel(element): try: anchor = element.find('a') title = anchor.get('title') href = anchor.get('href') except AttributeError: title, href = ('', '') return (title, href) def _load_pub_dataframe(pathname=PUB): """ Scrape the key data items from the HTML version of the public leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Kernel', 'Score', 'Number of Entries'] for key in keys: if key != 'Kernel': record[key] = rec.find('td', {'data-th': key}).text else: element = rec.find('td', {'data-th': key}) record['KernelTitle'], record['KernelHref'] = _extract_kernel(element) record['Last Entry'] = rec.find_all('span')[-1]['title'] rows.append(record) df = pd.DataFrame(rows) df['Last Entry'] = pd.to_datetime(df['Last Entry'].str.split().str[0:6].str.join(' ')) df['Team Name'] = _strip_all_spaces(df['Team Name']) df = df[df['Team Name'].notna()] df.columns = ['Rank', 'TeamName', 'KernelTitle', 'KernelHref', 'Score', 'Entries', 'Latest'] df = df.drop(['Score', 'Latest'], axis='columns') return df def _load_priv_dataframe(pathname=PRIV): """ Scrape the key data items from the HTML version of the private leaderboard. Return a pandas DataFrame """ with pathname.open() as f: soup = BeautifulSoup(f, 'lxml-xml') recs = soup.find_all('tr', class_=['competition-leaderboard__row', 'competition-leaderboard__row competition-leaderboard__row--user-scored']) rows = [] for rec in recs: record = dict() keys = ['Rank', 'Team Name', 'Score'] for key in keys: record[key] = rec.find('td', {'data-th': key}).text change_span = rec.find('td', {'data-th': 'Change'}).find('span') if change_span.find('span', class_='position-change__none'): change = (0, 0) elif change_span.find('span', class_='position-change__risen'): change = (1, int(change_span.find('span', class_='position-change__risen').text)) else: change = (-1, int(change_span.find('span', class_='position-change__fallen').text)) record['ChangeDirection'], record['ChangeNo'] = change rows.append(record) df = pd.DataFrame(rows) df['Team Name'] = _strip_all_spaces(df['Team Name']) df.columns = ['PrivRank', 'TeamName', 'PrivScore', 'ChangeDirection', 'ChangeNo'] return df def _load_csv_dataframe(pathname=CSV_PUB): """ Read the CSV version of the public leaderboard as downloaded from Kaggle. (This will be merged with the scraped version). Return a pandas DataFrame """ df = pd.read_csv(pathname) df.TeamName = df.TeamName.replace('\\s+', ' ', regex=True).str.strip() return df def _load_dataframe(pathname): if pathname == PUB: df = _load_pub_dataframe() elif pathname == CSV_PUB: df = _load_csv_dataframe() return df def _load_and_merge_public_lb(): """ Load and clean the two versions (HTML and CSV) of the public leaderboard. Then merge the two on TeamName. Return a merged pandas DataFrame. """ df = _load_dataframe(PUB) df_pub = _load_dataframe(CSV_PUB) df = df[df['TeamName'].isin(df_pub['TeamName'])] df_pub = df_pub[df_pub['TeamName'].isin(df['TeamName'])] final = df.merge(df_pub, on='TeamName', how='left') final.columns = ['PubRank', 'TeamName', 'KernelTitle', 'KernelHref', 'Entries', 'TeamId', 'SubmissionDate', 'PubScore'] return final def load_data(): """ Load all data (two versions of public and scraped version of private leaderboard). Merge into one DataFrame and set dtypes correctly. Return a pandas DataFrame """ pub_df = _load_and_merge_public_lb() priv_df = _load_priv_dataframe() df = pub_df.merge(priv_df, on='TeamName', how='left') type_dict = {'PubRank': 'int32', 'Entries': 'int32', 'SubmissionDate': 'datetime64', 'PubScore': 'float64', 'PrivRank': 'int32', 'PrivScore': 'float64', 'ChangeDirection': 'category', 'ChangeNo': 'int32'} for key, value in type_dict.items(): df[key] = df[key].astype(value) new_order = ['TeamId', 'TeamName', 'PubRank', 'PubScore', 'PrivRank', 'PrivScore', 'ChangeDirection', 'ChangeNo', 'SubmissionDate', 'Entries', 'KernelTitle', 'KernelHref'] df = df[new_order] return df DF = load_data() DF.sample(5) RANK = 500 SOURCE = DF[DF['PrivRank'] <= RANK].copy() x_data = SOURCE["Entries"] y_data = SOURCE["PrivRank"] color_data = SOURCE["ChangeDirection"] size_data = SOURCE["Entries"] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show() x_data = SOURCE['SubmissionDate'] y_data = SOURCE['Entries'] color_data = SOURCE['PrivRank'] size_data = SOURCE['Entries'] fig = px.scatter(SOURCE, x=x_data, y=y_data, color=color_data, size=size_data, opacity=0.75, hover_data=['TeamName']) fig.show()
code
1004763/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd DATA_FILE = '../input/uber-raw-data-aug14.csv' uber_data = pd.read_csv(DATA_FILE) uber_weekdays = uber_data.pivot_table(index=['DayOfWeekNum', 'DayOfWeek'], values='Base', aggfunc='count') uber_weekdays.plot(kind='bar', figsize=(8, 6), color='red') plt.ylabel('Number of Journeys') plt.title('Journeys by Day')
code
1004763/cell_4
[ "text_html_output_1.png" ]
import pandas as pd DATA_FILE = '../input/uber-raw-data-aug14.csv' uber_data = pd.read_csv(DATA_FILE) uber_data.head()
code
1004763/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd DATA_FILE = '../input/uber-raw-data-aug14.csv' uber_data = pd.read_csv(DATA_FILE) uber_data['Base'].unique()
code
1004763/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from matplotlib import cm print('Done')
code
1004763/cell_8
[ "text_html_output_1.png" ]
import pandas as pd DATA_FILE = '../input/uber-raw-data-aug14.csv' uber_data = pd.read_csv(DATA_FILE) uber_data.head()
code
1004763/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd DATA_FILE = '../input/uber-raw-data-aug14.csv' uber_data = pd.read_csv(DATA_FILE) uber_data.info()
code
105191248/cell_21
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first') youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix'] youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m') youtube['publish_time'] = pd.to_datetime(youtube['publish_time']) youtube.to_csv('youtube_edited.csv', index=False) youtube.groupby('category_id').agg({'views': 'max', 'likes': 'max', 'dislikes': 'max'}) grouped = youtube.groupby('title') grouped.filter(lambda x: len(x) > 30)
code
105191248/cell_13
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] youtube.info()
code
105191248/cell_9
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 indices = list(np.where(youtube['description'].isnull())[0]) indices
code
105191248/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') youtube.info()
code
105191248/cell_20
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100 youtube.loc[pd.isna(youtube['description']), :].index youtube = youtube.fillna('no description available for this video') youtube.loc[91] column_names = ['title', 'trending_date'] duplicates2 = youtube.duplicated(subset=column_names, keep=False) youtube[duplicates2].sort_values(by='title') youtube = youtube.drop_duplicates(['title', 'trending_date'], keep='first') youtube[youtube['title'] == '13 Reasons Why: Season 2 | Official Trailer [HD] | Netflix'] youtube['trending_date'] = pd.to_datetime(youtube['trending_date'], format='%y.%d.%m') youtube['publish_time'] = pd.to_datetime(youtube['publish_time']) youtube.to_csv('youtube_edited.csv', index=False) youtube.groupby('category_id').agg({'views': 'max', 'likes': 'max', 'dislikes': 'max'})
code
105191248/cell_6
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube = pd.read_csv('/kaggle/input/youtube-new/GBvideos.csv') missing_values_count = youtube.isnull().sum() missing_values_count total_cells = np.product(youtube.shape) total_missing = missing_values_count.sum() total_missing / total_cells * 100
code