path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88079861/cell_26
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from keras.regularizers import l2 from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input cnn_model = Sequential() cnn_model = models.Sequential() cnn_model.add(layers.Conv2D(300, (5, 5), kernel_regularizer=l2(5e-05), padding='Same', activation='relu', input_shape=(150, 150, 3))) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(200, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(100, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(64, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2))
code
88079861/cell_11
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed image_path = '../input/chest-xray-pneumonia/chest_xray/' labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False test_images, test_labels = load_images_from_directory(image_path, 'test')
code
88079861/cell_19
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) for i in range(30): plt.xticks([]) plt.yticks([]) plt.title('Train Labels Visualization') sns.countplot(x=train_labels, palette='flare') plt.show()
code
88079861/cell_18
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) plt.figure(figsize=(15, 10)) plt.suptitle('Train Images', fontsize=20) for i in range(30): plt.subplot(5, 6, i + 1) plt.xticks([]) plt.yticks([]) plt.grid(False) plt.xlabel(get_Label(train_labels[i])) plt.imshow(train_images[i], cmap=plt.cm.binary)
code
88079861/cell_32
[ "text_plain_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from keras.regularizers import l2 from sklearn.utils import shuffle from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.callbacks import EarlyStopping,ReduceLROnPlateau from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) test_images = np.asarray(test_images, np.float32) / 255 test_labels = np.asarray(test_labels) train_labels = to_categorical(train_labels, 2) test_labels = to_categorical(test_labels, 2) batch_size = 16 image_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.2, height_shift_range=0.2, width_shift_range=0.2, horizontal_flip=True, rotation_range=20) test_data_gen = ImageDataGenerator() val_data_gen = ImageDataGenerator() train = image_gen.flow(train_images, train_labels, shuffle=True, batch_size=batch_size) test = test_data_gen.flow(test_images, test_labels, shuffle=True, batch_size=batch_size) cnn_model = Sequential() cnn_model = models.Sequential() cnn_model.add(layers.Conv2D(300, (5, 5), kernel_regularizer=l2(5e-05), padding='Same', activation='relu', input_shape=(150, 150, 3))) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(200, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(100, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(64, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Flatten()) cnn_model.add(layers.Dense(100, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Dense(64, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(2, activation='sigmoid')) cnn_model.summary() cnn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau early = EarlyStopping(monitor='loss', mode='min', min_delta=0, patience=10, verbose=1, restore_best_weights=True) learning_rate_reduction = ReduceLROnPlateau(monitor='loss', patience=2, verbose=1, factor=0.3, min_lr=1e-06) callbacks_list = [early, learning_rate_reduction] n_training_samples = len(train) n_validation_samples = len(test) history = cnn_model.fit(train, epochs=50, validation_data=test, validation_steps=n_validation_samples // batch_size, shuffle=True, callbacks=callbacks_list) score, acc = cnn_model.evaluate(test, batch_size=batch_size) print('Test score:', score) print('Test accuracy:', acc)
code
88079861/cell_16
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) test_images = np.asarray(test_images, np.float32) / 255 test_labels = np.asarray(test_labels) print('test Images shape is : ', test_images.shape) print('test Labels shape is : ', test_labels.shape)
code
88079861/cell_31
[ "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from keras.regularizers import l2 from sklearn.utils import shuffle from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.callbacks import EarlyStopping,ReduceLROnPlateau from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.utils import to_categorical import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) test_images = np.asarray(test_images, np.float32) / 255 test_labels = np.asarray(test_labels) train_labels = to_categorical(train_labels, 2) test_labels = to_categorical(test_labels, 2) batch_size = 16 image_gen = ImageDataGenerator(shear_range=0.2, zoom_range=0.2, height_shift_range=0.2, width_shift_range=0.2, horizontal_flip=True, rotation_range=20) test_data_gen = ImageDataGenerator() val_data_gen = ImageDataGenerator() train = image_gen.flow(train_images, train_labels, shuffle=True, batch_size=batch_size) test = test_data_gen.flow(test_images, test_labels, shuffle=True, batch_size=batch_size) cnn_model = Sequential() cnn_model = models.Sequential() cnn_model.add(layers.Conv2D(300, (5, 5), kernel_regularizer=l2(5e-05), padding='Same', activation='relu', input_shape=(150, 150, 3))) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(200, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(100, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(64, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Flatten()) cnn_model.add(layers.Dense(100, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Dense(64, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(2, activation='sigmoid')) cnn_model.summary() cnn_model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau early = EarlyStopping(monitor='loss', mode='min', min_delta=0, patience=10, verbose=1, restore_best_weights=True) learning_rate_reduction = ReduceLROnPlateau(monitor='loss', patience=2, verbose=1, factor=0.3, min_lr=1e-06) callbacks_list = [early, learning_rate_reduction] n_training_samples = len(train) n_validation_samples = len(test) history = cnn_model.fit(train, epochs=50, validation_data=test, validation_steps=n_validation_samples // batch_size, shuffle=True, callbacks=callbacks_list)
code
88079861/cell_14
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) print('train Images shape is : ', train_images.shape) print('train Labels shape is : ', train_labels.shape)
code
88079861/cell_22
[ "image_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os import seaborn as sns seed = 42 np.random.seed = seed labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images = np.asarray(train_images, np.float32) / 255 train_labels = np.asarray(train_labels) test_images = np.asarray(test_images, np.float32) / 255 test_labels = np.asarray(test_labels) for i in range(30): plt.xticks([]) plt.yticks([]) for i in range(30): plt.xticks([]) plt.yticks([]) plt.title('Test Labels Visualization') sns.countplot(x=test_labels, palette='flare') plt.show()
code
88079861/cell_10
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed image_path = '../input/chest-xray-pneumonia/chest_xray/' labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False train_images, train_labels = load_images_from_directory(image_path, 'train')
code
88079861/cell_27
[ "text_plain_output_1.png" ]
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda from keras.models import Sequential, Model from keras.regularizers import l2 from tensorflow.keras import Sequential from tensorflow.keras import datasets, layers, models from tensorflow.keras.layers import Flatten, Dense,BatchNormalization,Dropout,Input cnn_model = Sequential() cnn_model = models.Sequential() cnn_model.add(layers.Conv2D(300, (5, 5), kernel_regularizer=l2(5e-05), padding='Same', activation='relu', input_shape=(150, 150, 3))) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(200, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(100, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Conv2D(64, (3, 3), kernel_regularizer=l2(5e-05), padding='same', activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.2)) cnn_model.add(layers.MaxPooling2D(2, 2)) cnn_model.add(layers.Flatten()) cnn_model.add(layers.Dense(100, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(layers.Dense(64, activation='relu')) cnn_model.add(BatchNormalization()) cnn_model.add(Dropout(0.3)) cnn_model.add(layers.Dense(2, activation='sigmoid')) cnn_model.summary()
code
88079861/cell_12
[ "text_plain_output_1.png" ]
from sklearn.utils import shuffle import cv2 import matplotlib.pyplot as plt import numpy as np import os seed = 42 np.random.seed = seed image_path = '../input/chest-xray-pneumonia/chest_xray/' labels = ['NORMAL', 'PNEUMONIA'] folders = ['train', 'test', 'val'] def load_images_from_directory(main_dirictory, foldername): total_labels = [] images = [] total_normal = 0 total_pneumonia = 0 path = os.path.join(main_dirictory, foldername) for lab in labels: full_path = os.path.join(path, lab) for image in os.listdir(full_path): img = cv2.imread(full_path + '/' + image) img = cv2.resize(img, (150, 150)) images.append(img) if lab == 'NORMAL': label = 0 total_normal += 1 elif lab == 'PNEUMONIA': label = 1 total_pneumonia += 1 total_labels.append(label) return shuffle(images, total_labels, random_state=756349782) def get_Label(number): labels = {0: 'NORMAL', 1: 'PNEUMONIA'} return labels[number] def plot_predection(model_name): images = [] count = 0 for i, files in enumerate(val_images): img = cv2.resize(files, (150, 150)) img = np.expand_dims(files, axis=0) feature = model_name.predict(img) predection = np.argmax(feature, axis=1) plt.xticks([]) plt.yticks([]) count += 1 if count == 30: break def freezing_layers(model_name): for layer in model_name.layers: layer.trainable = False val_images, val_labels = load_images_from_directory(image_path, 'val')
code
17118345/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
print('Salvando modelo em arquivo \n') mp = '.\\boston_model.h5' model.save(mp)
code
17118345/cell_23
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import numpy as np import tensorflow as tf np.random.seed(4) tf.set_random_seed(13) mp = '.\\boston_model.h5' model.save(mp) np.set_printoptions(precision=4) unknown = np.full(shape=(1, 13), fill_value=0.6, dtype=np.float32) unknown[0][3] = -1.0 predicted = model.predict(unknown) print('Usando o modelo para previsão de preço médio de casa para as caracteristicas: ') print(unknown) print('\nO preço médio será [dolares]: ') print(predicted * 10000)
code
17118345/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import keras as K import tensorflow as tf import pandas as pd import seaborn as sns import os from matplotlib import pyplot as plt os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
code
105212265/cell_4
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs]
code
105212265/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) print(fp) files.append(fp)
code
105212265/cell_18
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] for i, df in enumerate(dfs): dfs[i] = df.drop(columns=['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']) for df in dfs: df.replace([np.inf, -np.inf], np.nan, inplace=True) df.dropna(inplace=True) [df.shape for df in dfs] for df in dfs: df.drop_duplicates(inplace=True) df.reset_index(inplace=True, drop=True) [df.shape for df in dfs] pd.concat(objs=[dfs[0], dfs[1]], ignore_index=True, copy=False).to_parquet('L1-DoH-NonDoH.parquet') pd.concat(objs=[dfs[2], dfs[3]], ignore_index=True, copy=False).to_parquet('L2-BenignDoH-MaliciousDoH.parquet') df = pd.read_parquet('/kaggle/working/L1-DoH-NonDoH.parquet') df.Label.value_counts() df = pd.read_parquet('/kaggle/working/L2-BenignDoH-MaliciousDoH.parquet') df.Label.value_counts()
code
105212265/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] for i, df in enumerate(dfs): dfs[i] = df.drop(columns=['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']) for df in dfs: df.replace([np.inf, -np.inf], np.nan, inplace=True) df.dropna(inplace=True) [df.shape for df in dfs] for df in dfs: df.drop_duplicates(inplace=True) df.reset_index(inplace=True, drop=True) [df.shape for df in dfs] pd.concat(objs=[dfs[0], dfs[1]], ignore_index=True, copy=False).to_parquet('L1-DoH-NonDoH.parquet') pd.concat(objs=[dfs[2], dfs[3]], ignore_index=True, copy=False).to_parquet('L2-BenignDoH-MaliciousDoH.parquet') df = pd.read_parquet('/kaggle/working/L1-DoH-NonDoH.parquet') df.Label.value_counts()
code
105212265/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] for i, df in enumerate(dfs): dfs[i] = df.drop(columns=['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']) for df in dfs: df.replace([np.inf, -np.inf], np.nan, inplace=True) df.dropna(inplace=True) [df.shape for df in dfs] for df in dfs: print(df.duplicated().sum(), 'fully duplicate rows to remove') df.drop_duplicates(inplace=True) df.reset_index(inplace=True, drop=True) [df.shape for df in dfs]
code
105212265/cell_10
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] dfs[0].dtypes
code
105212265/cell_12
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] for i, df in enumerate(dfs): dfs[i] = df.drop(columns=['SourceIP', 'DestinationIP', 'SourcePort', 'DestinationPort', 'TimeStamp']) for df in dfs: df.replace([np.inf, -np.inf], np.nan, inplace=True) print(df.isna().any(axis=1).sum(), 'rows with at least one NaN to remove') df.dropna(inplace=True) [df.shape for df in dfs]
code
105212265/cell_5
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os files = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: fp = os.path.join(dirname, filename) files.append(fp) dfs = [pd.read_csv(f, sep=',', encoding='utf-8') for f in files] [df.shape for df in dfs] dfs[0].columns
code
2001614/cell_6
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import cvxpy as cvx import numpy as np import pandas as pd import numpy as np import pandas as pd import cvxpy as cvx import pylab as plt import networkx as nx def prepare_data(data, suffix): data = data.drop(data.columns[range(3, 8)], axis=1) data = data.drop(data.columns[range(0, 2)], axis=1) data = data.groupby('settlement_name_english').sum() data = data.add_suffix(suffix) return data def join_data(x_data, y_data, x_title, y_title): M1 = x_data.shape[1] M2 = y_data.shape[1] n_votes_x = np.sum(x_data.values) n_votes_y = np.sum(y_data.values) data_joint = pd.merge(x_data, y_data, how='inner', left_index=True, right_index=True) x_data = data_joint[data_joint.columns[range(1, M1)]] y_data = data_joint[data_joint.columns[M1 + 1:]] retained_votes_x = np.int(100 * np.round(np.sum(x_data.values) / n_votes_x, 2)) retained_votes_y = np.int(100 * np.round(np.sum(y_data.values) / n_votes_y, 2)) x_data = x_data.div(x_data.sum(axis=1), axis=0) x_data = x_data.mul(y_data.sum(axis=1), axis=0) avg_growth_factor = np.int(100 * np.round(np.sum(x_data.values) / n_votes_x - 1, 2)) return (x_data, y_data) x_data = pd.read_csv('../input/results_by_booth_2013 - english - v2.csv', encoding='iso-8859-1') y_data = pd.read_csv('../input/results_by_booth_2015 - english - v3.csv', encoding='iso-8859-1') x_data = prepare_data(x_data, '_2013') y_data = prepare_data(y_data, '_2015') [x_data, y_data] = join_data(x_data, y_data, '2013', '2015') M1 = x_data.shape[1] M2 = y_data.shape[1] def solve_transfer_coefficients(x_data, y_data): C = cvx.Variable(x_data.shape[1], y_data.shape[1]) constraints = [0 <= C, C <= 1, cvx.sum_entries(C, axis=1) == 1] objective = cvx.Minimize(cvx.sum_entries(cvx.square(x_data.values * C - y_data.values))) prob = cvx.Problem(objective, constraints) prob.solve() C_mat = C.value misplaced_votes = np.sum(np.abs(x_data.values * C_mat - y_data.values)) properly_placed_votes = np.int(100 * np.round(1 - misplaced_votes / np.sum(y_data.values), 2)) return C_mat def major_parties(data, threshold, title): party_is_major = data.sum(axis=0) / sum(data.sum(axis=0)) > threshold major_party_votes = np.sum(data.values[:, party_is_major], axis=0) votes_in_major_parties = np.int(100 * np.round(np.sum(major_party_votes) / np.sum(data.values), 2)) major_party_votes = major_party_votes / sum(major_party_votes) M = sum(party_is_major) major_party_titles = [party_is_major.index.values[party_is_major == True][n][:-5] for n in range(0, M)] return (party_is_major, major_party_votes, major_party_titles) C_mat = solve_transfer_coefficients(x_data, y_data) party_threshold = 0.02 transfer_threshold = 0.01 [major_x, major_party_votes_x, major_party_titles_x] = major_parties(x_data, party_threshold, '2013') M1 = major_party_votes_x.shape[0] [major_y, major_party_votes_y, major_party_titles_y] = major_parties(y_data, party_threshold, '2015') M2 = major_party_votes_y.shape[0] C_mat = C_mat[:, major_y.values] C_mat = C_mat[major_x.values, :] vote_transfers = np.diag(major_party_votes_x) * C_mat predicted_y = major_party_votes_x * C_mat major_parties_error = np.sum(np.abs(major_party_votes_y - predicted_y)) major_parties_proper_votes = np.int(100 * np.round(1 - major_parties_error, 2)) print('Transfer model properly accounts for ' + str(major_parties_proper_votes) + '% of the votes for major parties')
code
2001614/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import cvxpy as cvx import pylab as plt import networkx as nx def prepare_data(data, suffix): data = data.drop(data.columns[range(3, 8)], axis=1) data = data.drop(data.columns[range(0, 2)], axis=1) data = data.groupby('settlement_name_english').sum() data = data.add_suffix(suffix) return data def join_data(x_data, y_data, x_title, y_title): M1 = x_data.shape[1] M2 = y_data.shape[1] n_votes_x = np.sum(x_data.values) n_votes_y = np.sum(y_data.values) data_joint = pd.merge(x_data, y_data, how='inner', left_index=True, right_index=True) x_data = data_joint[data_joint.columns[range(1, M1)]] y_data = data_joint[data_joint.columns[M1 + 1:]] retained_votes_x = np.int(100 * np.round(np.sum(x_data.values) / n_votes_x, 2)) retained_votes_y = np.int(100 * np.round(np.sum(y_data.values) / n_votes_y, 2)) x_data = x_data.div(x_data.sum(axis=1), axis=0) x_data = x_data.mul(y_data.sum(axis=1), axis=0) avg_growth_factor = np.int(100 * np.round(np.sum(x_data.values) / n_votes_x - 1, 2)) return (x_data, y_data) x_data = pd.read_csv('../input/results_by_booth_2013 - english - v2.csv', encoding='iso-8859-1') y_data = pd.read_csv('../input/results_by_booth_2015 - english - v3.csv', encoding='iso-8859-1') x_data = prepare_data(x_data, '_2013') y_data = prepare_data(y_data, '_2015') [x_data, y_data] = join_data(x_data, y_data, '2013', '2015') M1 = x_data.shape[1] M2 = y_data.shape[1]
code
105206225/cell_9
[ "text_plain_output_5.png", "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, LSTM, Dropout, Conv1D, Conv2D, MaxPooling2D, Flatten from keras.models import Sequential from os import listdir from os.path import isfile, join import numpy import numpy as np import pandas as pd import scipy.io as sio number_of_classes = 4 def change(x): answer = np.zeros(np.shape(x)[0]) for i in range(np.shape(x)[0]): max_value = max(x[i, :]) max_index = list(x[i, :]).index(max_value) answer[i] = max_index return answer.astype(int) mypath = '../input/mitbihecgtraining2017/training2017/' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[0] == 'A'] bats = [f for f in onlyfiles if f[7] == 'm'] mats = [f for f in bats if np.shape(sio.loadmat(mypath + f)['val'])[1] >= 9000] check = np.shape(sio.loadmat(mypath + mats[0])['val'])[1] check = 9000 X = np.zeros((len(mats), check)) for i in range(len(mats)): X[i, :] = sio.loadmat(mypath + mats[i])['val'][0, :9000] target_train = np.zeros((len(mats), 1)) Train_data = pd.read_csv(mypath + 'REFERENCE.csv', sep=',', header=None, names=None) for i in range(len(mats)): if Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'N': target_train[i] = 0 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'A': target_train[i] = 1 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'O': target_train[i] = 2 else: target_train[i] = 3 Label_set = np.zeros((len(mats), number_of_classes)) for i in range(np.shape(target_train)[0]): dummy = np.zeros(number_of_classes) dummy[int(target_train[i])] = 1 Label_set[i, :] = dummy train_len = 0.8 X_train = X[:int(train_len * len(mats)), :] Y_train = Label_set[:int(train_len * len(mats)), :] X_val = X[int(train_len * len(mats)):, :] Y_val = Label_set[int(train_len * len(mats)):, :] n = 20 m = 450 c = 1 X_train = numpy.reshape(X_train, (X_train.shape[0], n, m, c)) X_val = numpy.reshape(X_val, (X_val.shape[0], n, m, c)) image_size = (n, m, c) batch_size = 32 model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', input_shape=image_size, padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='relu')) model.add(Dense(number_of_classes, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(X_train, Y_train, epochs=25, batch_size=batch_size, validation_split=0.3, verbose=1, shuffle=False) model.evaluate(X_val, Y_val)
code
105206225/cell_4
[ "text_plain_output_1.png" ]
from os import listdir from os.path import isfile, join import numpy as np import scipy.io as sio number_of_classes = 4 def change(x): answer = np.zeros(np.shape(x)[0]) for i in range(np.shape(x)[0]): max_value = max(x[i, :]) max_index = list(x[i, :]).index(max_value) answer[i] = max_index return answer.astype(int) mypath = '../input/mitbihecgtraining2017/training2017/' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[0] == 'A'] bats = [f for f in onlyfiles if f[7] == 'm'] mats = [f for f in bats if np.shape(sio.loadmat(mypath + f)['val'])[1] >= 9000] check = np.shape(sio.loadmat(mypath + mats[0])['val'])[1] check = 9000 X = np.zeros((len(mats), check)) print(len(mats), check, X.shape) for i in range(len(mats)): X[i, :] = sio.loadmat(mypath + mats[i])['val'][0, :9000]
code
105206225/cell_6
[ "text_plain_output_1.png" ]
from os import listdir from os.path import isfile, join import numpy import numpy as np import pandas as pd import scipy.io as sio number_of_classes = 4 def change(x): answer = np.zeros(np.shape(x)[0]) for i in range(np.shape(x)[0]): max_value = max(x[i, :]) max_index = list(x[i, :]).index(max_value) answer[i] = max_index return answer.astype(int) mypath = '../input/mitbihecgtraining2017/training2017/' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[0] == 'A'] bats = [f for f in onlyfiles if f[7] == 'm'] mats = [f for f in bats if np.shape(sio.loadmat(mypath + f)['val'])[1] >= 9000] check = np.shape(sio.loadmat(mypath + mats[0])['val'])[1] check = 9000 X = np.zeros((len(mats), check)) for i in range(len(mats)): X[i, :] = sio.loadmat(mypath + mats[i])['val'][0, :9000] target_train = np.zeros((len(mats), 1)) Train_data = pd.read_csv(mypath + 'REFERENCE.csv', sep=',', header=None, names=None) for i in range(len(mats)): if Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'N': target_train[i] = 0 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'A': target_train[i] = 1 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'O': target_train[i] = 2 else: target_train[i] = 3 Label_set = np.zeros((len(mats), number_of_classes)) for i in range(np.shape(target_train)[0]): dummy = np.zeros(number_of_classes) dummy[int(target_train[i])] = 1 Label_set[i, :] = dummy train_len = 0.8 X_train = X[:int(train_len * len(mats)), :] Y_train = Label_set[:int(train_len * len(mats)), :] X_val = X[int(train_len * len(mats)):, :] Y_val = Label_set[int(train_len * len(mats)):, :] n = 20 m = 450 c = 1 X_train = numpy.reshape(X_train, (X_train.shape[0], n, m, c)) X_val = numpy.reshape(X_val, (X_val.shape[0], n, m, c)) image_size = (n, m, c) print(Y_train.shape)
code
105206225/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, LSTM, Dropout, Conv1D, Conv2D, MaxPooling2D, Flatten from keras.models import Sequential from os import listdir from os.path import isfile, join import numpy import numpy as np import pandas as pd import scipy.io as sio number_of_classes = 4 def change(x): answer = np.zeros(np.shape(x)[0]) for i in range(np.shape(x)[0]): max_value = max(x[i, :]) max_index = list(x[i, :]).index(max_value) answer[i] = max_index return answer.astype(int) mypath = '../input/mitbihecgtraining2017/training2017/' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[0] == 'A'] bats = [f for f in onlyfiles if f[7] == 'm'] mats = [f for f in bats if np.shape(sio.loadmat(mypath + f)['val'])[1] >= 9000] check = np.shape(sio.loadmat(mypath + mats[0])['val'])[1] check = 9000 X = np.zeros((len(mats), check)) for i in range(len(mats)): X[i, :] = sio.loadmat(mypath + mats[i])['val'][0, :9000] target_train = np.zeros((len(mats), 1)) Train_data = pd.read_csv(mypath + 'REFERENCE.csv', sep=',', header=None, names=None) for i in range(len(mats)): if Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'N': target_train[i] = 0 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'A': target_train[i] = 1 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'O': target_train[i] = 2 else: target_train[i] = 3 Label_set = np.zeros((len(mats), number_of_classes)) for i in range(np.shape(target_train)[0]): dummy = np.zeros(number_of_classes) dummy[int(target_train[i])] = 1 Label_set[i, :] = dummy train_len = 0.8 X_train = X[:int(train_len * len(mats)), :] Y_train = Label_set[:int(train_len * len(mats)), :] X_val = X[int(train_len * len(mats)):, :] Y_val = Label_set[int(train_len * len(mats)):, :] n = 20 m = 450 c = 1 X_train = numpy.reshape(X_train, (X_train.shape[0], n, m, c)) X_val = numpy.reshape(X_val, (X_val.shape[0], n, m, c)) image_size = (n, m, c) batch_size = 32 model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', input_shape=image_size, padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
code
105206225/cell_8
[ "text_plain_output_1.png" ]
from keras.layers import Dense, LSTM, Dropout, Conv1D, Conv2D, MaxPooling2D, Flatten from keras.models import Sequential from os import listdir from os.path import isfile, join import numpy import numpy as np import pandas as pd import scipy.io as sio number_of_classes = 4 def change(x): answer = np.zeros(np.shape(x)[0]) for i in range(np.shape(x)[0]): max_value = max(x[i, :]) max_index = list(x[i, :]).index(max_value) answer[i] = max_index return answer.astype(int) mypath = '../input/mitbihecgtraining2017/training2017/' onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f[0] == 'A'] bats = [f for f in onlyfiles if f[7] == 'm'] mats = [f for f in bats if np.shape(sio.loadmat(mypath + f)['val'])[1] >= 9000] check = np.shape(sio.loadmat(mypath + mats[0])['val'])[1] check = 9000 X = np.zeros((len(mats), check)) for i in range(len(mats)): X[i, :] = sio.loadmat(mypath + mats[i])['val'][0, :9000] target_train = np.zeros((len(mats), 1)) Train_data = pd.read_csv(mypath + 'REFERENCE.csv', sep=',', header=None, names=None) for i in range(len(mats)): if Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'N': target_train[i] = 0 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'A': target_train[i] = 1 elif Train_data.loc[Train_data[0] == mats[i][:6], 1].values == 'O': target_train[i] = 2 else: target_train[i] = 3 Label_set = np.zeros((len(mats), number_of_classes)) for i in range(np.shape(target_train)[0]): dummy = np.zeros(number_of_classes) dummy[int(target_train[i])] = 1 Label_set[i, :] = dummy train_len = 0.8 X_train = X[:int(train_len * len(mats)), :] Y_train = Label_set[:int(train_len * len(mats)), :] X_val = X[int(train_len * len(mats)):, :] Y_val = Label_set[int(train_len * len(mats)):, :] n = 20 m = 450 c = 1 X_train = numpy.reshape(X_train, (X_train.shape[0], n, m, c)) X_val = numpy.reshape(X_val, (X_val.shape[0], n, m, c)) image_size = (n, m, c) batch_size = 32 model = Sequential() model.add(Conv2D(64, (3, 3), activation='relu', input_shape=image_size, padding='same')) model.add(Conv2D(64, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(Conv2D(128, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(Conv2D(256, (3, 3), activation='relu', padding='same')) model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2))) model.add(Flatten()) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(4096, activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1000, activation='relu')) model.add(Dense(number_of_classes, activation='softmax')) print(Y_train[5:10]) model.summary()
code
105206225/cell_3
[ "text_plain_output_1.png" ]
import keras keras.backend.image_data_format()
code
72071717/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/google-play-store-dataset/googleplaystore1.csv') df.head()
code
72071717/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/google-play-store-dataset/googleplaystore1.csv') df = df[df.Installs != 'Free'] df.Installs = df.Installs.astype(int)
code
74049467/cell_9
[ "text_plain_output_1.png" ]
from optuna.integration import LightGBMPruningCallback from sklearn.impute import SimpleImputer from sklearn.metrics import log_loss, mean_squared_error from sklearn.model_selection import KFold ,StratifiedKFold,cross_validate,train_test_split import lightgbm as lgbm import numpy as np import optuna import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv') X = train.drop(['id', 'claim'], axis=1) X_test = test.drop('id', axis=1) y = train['claim'] imputer = SimpleImputer(missing_values=np.nan, strategy='median') X = imputer.fit_transform(X) X_test = imputer.transform(X_test) from optuna.integration import LightGBMPruningCallback def objective(trial, X, y): param_grid = {'device_type': trial.suggest_categorical('device_type', ['gpu']), 'n_estimators': trial.suggest_categorical('n_estimators', [10000]), 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3), 'num_leaves': trial.suggest_int('num_leaves', 20, 3000, step=20), 'max_depth': trial.suggest_int('max_depth', 3, 12), 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 200, 10000, step=100), 'lambda_l1': trial.suggest_int('lambda_l1', 0, 100, step=5), 'lambda_l2': trial.suggest_int('lambda_l2', 0, 100, step=5), 'min_gain_to_split': trial.suggest_float('min_gain_to_split', 0, 15), 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.2, 0.95, step=0.1), 'bagging_freq': trial.suggest_categorical('bagging_freq', [1]), 'feature_fraction': trial.suggest_float('feature_fraction', 0.2, 0.95, step=0.1)} cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1121218) cv_scores = np.empty(5) for idx, (train_idx, test_idx) in enumerate(cv.split(X, y)): X_train, X_test = (X[train_idx], X[test_idx]) y_train, y_test = (y[train_idx], y[test_idx]) model = lgbm.LGBMClassifier(objective='binary', **param_grid) model.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='binary_logloss', early_stopping_rounds=100, callbacks=[LightGBMPruningCallback(trial, 'binary_logloss')], verbose=2) preds = model.predict_proba(X_test) cv_scores[idx] = log_loss(y_test, preds) return np.mean(cv_scores) study = optuna.create_study(direction='minimize', study_name='LGBM Classifier') func = lambda trial: objective(trial, X, y) study.optimize(func, n_trials=20) print(f'\tBest value (rmse): {study.best_value:.5f}') print(f'\tBest params:') for key, value in study.best_params.items(): print(f'\t\t{key}: {value}')
code
74049467/cell_2
[ "text_plain_output_1.png" ]
print('Hello')
code
74049467/cell_8
[ "application_vnd.jupyter.stderr_output_27.png", "application_vnd.jupyter.stderr_output_35.png", "application_vnd.jupyter.stderr_output_9.png", "text_plain_output_30.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_40.png", "text_plain_output_20.png", "application_vnd.jupyter.stderr_output_31.png", "text_plain_output_4.png", "application_vnd.jupyter.stderr_output_33.png", "application_vnd.jupyter.stderr_output_25.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "application_vnd.jupyter.stderr_output_23.png", "text_plain_output_18.png", "text_plain_output_36.png", "application_vnd.jupyter.stderr_output_19.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "text_plain_output_22.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_38.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "application_vnd.jupyter.stderr_output_17.png", "text_plain_output_26.png", "text_plain_output_34.png", "application_vnd.jupyter.stderr_output_41.png", "text_plain_output_28.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_29.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_12.png", "application_vnd.jupyter.stderr_output_39.png", "application_vnd.jupyter.stderr_output_21.png", "application_vnd.jupyter.stderr_output_37.png" ]
from optuna.integration import LightGBMPruningCallback from sklearn.impute import SimpleImputer from sklearn.metrics import log_loss, mean_squared_error from sklearn.model_selection import KFold ,StratifiedKFold,cross_validate,train_test_split import lightgbm as lgbm import numpy as np import optuna import pandas as pd train = pd.read_csv('../input/tabular-playground-series-sep-2021/train.csv') test = pd.read_csv('../input/tabular-playground-series-sep-2021/test.csv') sub = pd.read_csv('../input/tabular-playground-series-sep-2021/sample_solution.csv') X = train.drop(['id', 'claim'], axis=1) X_test = test.drop('id', axis=1) y = train['claim'] imputer = SimpleImputer(missing_values=np.nan, strategy='median') X = imputer.fit_transform(X) X_test = imputer.transform(X_test) from optuna.integration import LightGBMPruningCallback def objective(trial, X, y): param_grid = {'device_type': trial.suggest_categorical('device_type', ['gpu']), 'n_estimators': trial.suggest_categorical('n_estimators', [10000]), 'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3), 'num_leaves': trial.suggest_int('num_leaves', 20, 3000, step=20), 'max_depth': trial.suggest_int('max_depth', 3, 12), 'min_data_in_leaf': trial.suggest_int('min_data_in_leaf', 200, 10000, step=100), 'lambda_l1': trial.suggest_int('lambda_l1', 0, 100, step=5), 'lambda_l2': trial.suggest_int('lambda_l2', 0, 100, step=5), 'min_gain_to_split': trial.suggest_float('min_gain_to_split', 0, 15), 'bagging_fraction': trial.suggest_float('bagging_fraction', 0.2, 0.95, step=0.1), 'bagging_freq': trial.suggest_categorical('bagging_freq', [1]), 'feature_fraction': trial.suggest_float('feature_fraction', 0.2, 0.95, step=0.1)} cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=1121218) cv_scores = np.empty(5) for idx, (train_idx, test_idx) in enumerate(cv.split(X, y)): X_train, X_test = (X[train_idx], X[test_idx]) y_train, y_test = (y[train_idx], y[test_idx]) model = lgbm.LGBMClassifier(objective='binary', **param_grid) model.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='binary_logloss', early_stopping_rounds=100, callbacks=[LightGBMPruningCallback(trial, 'binary_logloss')], verbose=2) preds = model.predict_proba(X_test) cv_scores[idx] = log_loss(y_test, preds) return np.mean(cv_scores) study = optuna.create_study(direction='minimize', study_name='LGBM Classifier') func = lambda trial: objective(trial, X, y) study.optimize(func, n_trials=20)
code
18139674/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') import seaborn as sns import matplotlib.pyplot as plt cardTypes = ['discover', 'mastercard', 'visa', 'american express'] for i, i_card in enumerate(cardTypes): cardData = eval('train_transaction_data.loc[train_transaction_data["card4"]=="' + i_card + '"]') normalDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 0] fraudDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 1] prodTypes = train_transaction_data['ProductCD'].unique() for i,i_prod in enumerate(prodTypes): productData = eval('train_transaction_data.loc[train_transaction_data["ProductCD"]=="'+i_prod+'"]') plt.figure(i) ax = sns.barplot(x="isFraud", y="isFraud", data=productData, estimator=lambda x: len(x) / len(productData) * 100) ax.set(ylabel="Percent") ax.set_title(i_prod) sns.countplot(train_identity_data['DeviceType'])
code
18139674/cell_9
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') normalDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 0] normalDataTransaction.TransactionAmt.describe()
code
18139674/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') print(train_transaction_data.head(10))
code
18139674/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') import seaborn as sns import matplotlib.pyplot as plt sns.countplot(train_transaction_data['card4'])
code
18139674/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') import seaborn as sns import matplotlib.pyplot as plt cardTypes = ['discover', 'mastercard', 'visa', 'american express'] for i, i_card in enumerate(cardTypes): cardData = eval('train_transaction_data.loc[train_transaction_data["card4"]=="' + i_card + '"]') normalDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 0] fraudDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 1] prodTypes = train_transaction_data['ProductCD'].unique() for i, i_prod in enumerate(prodTypes): productData = eval('train_transaction_data.loc[train_transaction_data["ProductCD"]=="' + i_prod + '"]') plt.figure(i) ax = sns.barplot(x='isFraud', y='isFraud', data=productData, estimator=lambda x: len(x) / len(productData) * 100) ax.set(ylabel='Percent') ax.set_title(i_prod)
code
18139674/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
18139674/cell_7
[ "image_output_5.png", "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') import seaborn as sns import matplotlib.pyplot as plt cardTypes = ['discover', 'mastercard', 'visa', 'american express'] for i, i_card in enumerate(cardTypes): cardData = eval('train_transaction_data.loc[train_transaction_data["card4"]=="' + i_card + '"]') plt.figure(i) sns.countplot(cardData['isFraud']).set_title(i_card)
code
18139674/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') print('The average transaction amount for non fraudulent transactions is: ', np.mean(train_transaction_data.loc[train_transaction_data['isFraud'] == 0]['TransactionAmt'])) print('The average transaction amount for fraudulent transactions is: ', np.mean(train_transaction_data.loc[train_transaction_data['isFraud'] == 1]['TransactionAmt']))
code
18139674/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') normalDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 0] fraudDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 1] raw_train_data = pd.merge(train_transaction_data, train_identity_data, on='TransactionID', how='left') print(raw_train_data.head(5))
code
18139674/cell_3
[ "image_output_4.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') print(train_identity_data.head(10))
code
18139674/cell_10
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') normalDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 0] fraudDataTransaction = train_transaction_data.loc[train_transaction_data['isFraud'] == 1] fraudDataTransaction.TransactionAmt.describe()
code
18139674/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_identity_data = pd.read_csv('../input/train_identity.csv') train_transaction_data = pd.read_csv('../input/train_transaction.csv') test_identity_data = pd.read_csv('../input/test_identity.csv') test_transaction_data = pd.read_csv('../input/test_transaction.csv') import seaborn as sns import matplotlib.pyplot as plt sns.countplot(train_transaction_data['isFraud'])
code
17139836/cell_21
[ "image_output_11.png", "image_output_14.png", "image_output_13.png", "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_16.png", "image_output_6.png", "image_output_12.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_15.png", "image_output_9.png" ]
from sklearn.linear_model import LinearRegression from sklearn.metrics import median_absolute_error from sklearn.metrics import r2_score from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import make_scorer from sklearn.metrics import r2_score from sklearn.metrics import median_absolute_error X_train, X_test, y_train, y_test = train_test_split(train['acoustic_data'].values[::25].reshape(-1, 1), train['time_to_failure'].values[::25], test_size=0.2) quake_linear_model = LinearRegression() quake_linear_model.fit(X_train, y_train) y_train_pred = quake_linear_model.predict(X_train) y_test_pred = quake_linear_model.predict(X_test) r2_train_score = r2_score(y_train, y_train_pred) mae_train_score = median_absolute_error(y_train, y_train_pred) r2_test_score = r2_score(y_test, y_test_pred) mae_test_score = median_absolute_error(y_test, y_test_pred) print('R2 score for training data: {} and for the test data: {}'.format(r2_train_score, r2_test_score)) print('Mean Absolute Error score for training data: {} and for the test data: {}'.format(mae_train_score, mae_test_score))
code
17139836/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt fig, ax = plt.subplots(2,1, figsize=(20,10)) ax[0].plot(train['acoustic_data'].values[::100], color='g') ax[0].set_title("Acoustic data for 1% sample data") ax[0].set_xlabel("Index") ax[0].set_ylabel("Acoustic Data Signal"); ax[1].plot(train['time_to_failure'].values[::100], color='b') ax[1].set_title("Time to Failure for 1% sample data") ax[1].set_xlabel("Index") ax[1].set_ylabel("Time to Failure in ms"); def plotAroundPoints(start, end, ith): fig, ax1 = plt.subplots(figsize=(8, 4)) plt.title('Trends of acoustic_data and time_to_failure around the {} earthquake'.format(ith)) plt.plot(train['acoustic_data'].values[start:end:50], color='b') ax1.set_ylabel('acoustic_data', color='b') plt.legend(['acoustic_data']) ax2 = ax1.twinx() plt.plot(train['time_to_failure'].values[start:end:50], color='g') ax2.set_ylabel('time_to_failure', color='g') plt.legend(['time_to_failure'], loc=(0.75, 0.1)) plotAroundPoints(0, 30000000, 'first') plotAroundPoints(30000000, 60000000, 'second') plotAroundPoints(90000000, 120000000, 'third') plotAroundPoints(125000000, 155000000, 'fourth') plotAroundPoints(170000000, 200000000, 'fifth') plotAroundPoints(200000000, 230000000, 'sixth') plotAroundPoints(225000000, 255000000, 'seventh') plotAroundPoints(285000000, 315000000, 'eigth') plotAroundPoints(325000000, 355000000, 'ninth') plotAroundPoints(360000000, 390000000, 'tenth') plotAroundPoints(405000000, 455000000, 'eleventh') plotAroundPoints(440000000, 470000000, 'twelvth') plotAroundPoints(480000000, 510000000, 'thirteenth') plotAroundPoints(510000000, 540000000, 'fourteenth') plotAroundPoints(560000000, 590000000, 'fifteenth') plotAroundPoints(605000000, 635000000, 'sixteenth')
code
17139836/cell_4
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
train = pd.read_csv('../input/train.csv', dtype={'acoustic_data': np.int16, 'time_to_failure': np.float32})
code
17139836/cell_34
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.callbacks import ModelCheckpoint from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.models import Sequential from sklearn.model_selection import train_test_split from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y X_train, X_test, y_train, y_test = train_test_split(X_data.values, y_data.values, test_size=0.2) X_train.shape from keras.models import Sequential from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.optimizers import adam from keras.callbacks import ModelCheckpoint model = Sequential() model.add(CuDNNGRU(64, kernel_initializer='RandomUniform', input_shape=(X_train.shape[1], 1))) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1)) model.summary() from keras.callbacks import ModelCheckpoint X_train_array = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) y_train_array = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1)) model.compile(loss='mean_absolute_error', optimizer='rmsprop', metrics=['mae']) checkpointer = ModelCheckpoint('model.weights.hdf5', save_best_only=True, verbose=1) build = model.fit(X_train_array, y_train, epochs=200, batch_size=30, validation_split=0.2, callbacks=[checkpointer], verbose=1) print(build.history.keys())
code
17139836/cell_30
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y X_train, X_test, y_train, y_test = train_test_split(X_data.values, y_data.values, test_size=0.2) X_train.shape
code
17139836/cell_33
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.callbacks import ModelCheckpoint from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.models import Sequential from sklearn.model_selection import train_test_split from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y X_train, X_test, y_train, y_test = train_test_split(X_data.values, y_data.values, test_size=0.2) X_train.shape from keras.models import Sequential from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.optimizers import adam from keras.callbacks import ModelCheckpoint model = Sequential() model.add(CuDNNGRU(64, kernel_initializer='RandomUniform', input_shape=(X_train.shape[1], 1))) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1)) model.summary() from keras.callbacks import ModelCheckpoint X_train_array = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1)) y_train_array = np.reshape(y_train, (y_train.shape[0], y_train.shape[1], 1)) model.compile(loss='mean_absolute_error', optimizer='rmsprop', metrics=['mae']) checkpointer = ModelCheckpoint('model.weights.hdf5', save_best_only=True, verbose=1) build = model.fit(X_train_array, y_train, epochs=200, batch_size=30, validation_split=0.2, callbacks=[checkpointer], verbose=1)
code
17139836/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt fig, ax = plt.subplots(2, 1, figsize=(20, 10)) ax[0].plot(train['acoustic_data'].values[::100], color='g') ax[0].set_title('Acoustic data for 1% sample data') ax[0].set_xlabel('Index') ax[0].set_ylabel('Acoustic Data Signal') ax[1].plot(train['time_to_failure'].values[::100], color='b') ax[1].set_title('Time to Failure for 1% sample data') ax[1].set_xlabel('Index') ax[1].set_ylabel('Time to Failure in ms')
code
17139836/cell_26
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y
code
17139836/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import time from tqdm import tqdm import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
17139836/cell_32
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.models import Sequential from sklearn.model_selection import train_test_split from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y X_train, X_test, y_train, y_test = train_test_split(X_data.values, y_data.values, test_size=0.2) X_train.shape from keras.models import Sequential from keras.layers import Dense, Dropout, CuDNNGRU, CuDNNLSTM, Flatten from keras.optimizers import adam from keras.callbacks import ModelCheckpoint model = Sequential() model.add(CuDNNGRU(64, kernel_initializer='RandomUniform', input_shape=(X_train.shape[1], 1))) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(64, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(1)) model.summary()
code
17139836/cell_15
[ "image_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) len(submission)
code
17139836/cell_14
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) submission.head()
code
17139836/cell_27
[ "text_html_output_1.png" ]
from tqdm import tqdm import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id', dtype={'time_to_failure': np.float32}) chunk_size = 150000 chunks = int(np.floor(train.shape[0] / chunk_size)) X_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['min', 'max', 'std', 'avg', 'sum', 'median', 'mean_diff', 'q05', 'q25', 'q75', 'q95']) y_data = pd.DataFrame(index=range(chunks), dtype=np.float32, columns=['ttf']) def create_features(data_chunk, X_df, chunk_no, col_name='acoustic_data'): x = data_chunk[col_name] X_df.loc[chunk_no, 'min'] = x.min() X_df.loc[chunk_no, 'max'] = x.max() X_df.loc[chunk_no, 'std'] = x.std() X_df.loc[chunk_no, 'avg'] = x.mean() X_df.loc[chunk_no, 'sum'] = x.sum() X_df.loc[chunk_no, 'median'] = x.median() X_df.loc[chunk_no, 'mean_diff'] = np.mean(np.diff(x)) X_df.loc[chunk_no, 'q05'] = np.quantile(x, 0.05) X_df.loc[chunk_no, 'q25'] = np.quantile(x, 0.25) X_df.loc[chunk_no, 'q75'] = np.quantile(x, 0.75) X_df.loc[chunk_no, 'q95'] = np.quantile(x, 0.95) return X_df for chunk_no in tqdm(range(chunks)): data_chunk = train.iloc[chunk_no * chunk_size:chunk_no * chunk_size + chunk_size] X_data = create_features(data_chunk, X_data, chunk_no) y = data_chunk['time_to_failure'].values[-1] y_data.loc[chunk_no, 'ttf'] = y print(X_data.shape) print(y_data.shape) print(X_data.shape[1]) X_data.head()
code
17139836/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) test_dir = '../input/test' test_files = os.listdir(test_dir) print(test_files[0:5]) print('Number of test files: {}'.format(len(test_files))) test_file_0 = pd.read_csv('../input/test/' + test_files[0]) print('Dimensions of the first test file: {}'.format(test_file_0.shape)) test_file_0.head()
code
17139836/cell_5
[ "text_plain_output_1.png" ]
print(train.shape) print(train.head())
code
128019465/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") ax = sns.barplot( y = 'Skills', x='Total Value', data = total.sort_values( 'Total Value', ascending = False ) ) plt.title('Summation of Values per Skill') plt.show() df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data palette_color = sns.color_palette(palette = 'deep') fig = plt.figure(figsize=(20,10)) fig.patch.set_facecolor('ghostwhite') plt.pie( pie_data['Average Score'], labels = pie_data.index, colors = palette_color, autopct = '%.0f%%' ) centre_circle = plt.Circle((0, 0), 0.72, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('Percentage Distribution of Skills for Employable on Average') plt.show() fig = plt.figure(figsize=(15, 7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style='darkgrid', palette='deep') sns.barplot(y=ave_skills.index, x='Average Score', data=ave_skills.sort_values('Average Score', ascending=False)) plt.xticks(np.arange(0, 5.25, 0.25)) plt.title('Average Score of the Employable Students per Category') plt.show()
code
128019465/cell_9
[ "image_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills df_employed.head()
code
128019465/cell_4
[ "image_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape df.head()
code
128019465/cell_6
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15, 7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style='darkgrid', palette='deep') ax = sns.barplot(y='Skills', x='Total Value', data=total.sort_values('Total Value', ascending=False)) plt.title('Summation of Values per Skill') plt.show()
code
128019465/cell_2
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape
code
128019465/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") ax = sns.barplot( y = 'Skills', x='Total Value', data = total.sort_values( 'Total Value', ascending = False ) ) plt.title('Summation of Values per Skill') plt.show() df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data palette_color = sns.color_palette(palette='deep') fig = plt.figure(figsize=(20, 10)) fig.patch.set_facecolor('ghostwhite') plt.pie(pie_data['Average Score'], labels=pie_data.index, colors=palette_color, autopct='%.0f%%') centre_circle = plt.Circle((0, 0), 0.72, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('Percentage Distribution of Skills for Employable on Average') plt.show()
code
128019465/cell_19
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") ax = sns.barplot( y = 'Skills', x='Total Value', data = total.sort_values( 'Total Value', ascending = False ) ) plt.title('Summation of Values per Skill') plt.show() df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data palette_color = sns.color_palette(palette = 'deep') fig = plt.figure(figsize=(20,10)) fig.patch.set_facecolor('ghostwhite') plt.pie( pie_data['Average Score'], labels = pie_data.index, colors = palette_color, autopct = '%.0f%%' ) centre_circle = plt.Circle((0, 0), 0.72, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('Percentage Distribution of Skills for Employable on Average') plt.show() fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") sns.barplot( y = ave_skills.index, x= 'Average Score', data = ave_skills.sort_values( 'Average Score', ascending = False ) ) plt.xticks(np.arange(0,5.25,0.25)) plt.title('Average Score of the Employable Students per Category') plt.show() df_ave = pd.DataFrame({'Student': df['Name of Student'].loc[df['CLASS'] == 'Employable'], 'Average Score': df.loc[df['CLASS'] == 'Employable']._get_numeric_data().mean(axis=1)}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style='darkgrid', palette = 'dark') ax = sns.histplot( x = 'Average Score', data = df_ave ) df_ave.plot( kind='kde', ax=ax, secondary_y=True, color = 'orange' ) plt.xticks(np.arange(2,6.25,0.25)) plt.title('Mean Scores per Student Histogram for Employable') plt.show() df_less = df.loc[df['CLASS'] == 'LessEmployable'].drop(columns=['CLASS', 'Name of Student']) fig = plt.figure(figsize=(15, 7.5)) fig.patch.set_facecolor('ghostwhite') less_ave_skills = pd.DataFrame({'Average': df_less.mean()}) sns.set_theme(style='darkgrid', palette='deep') sns.barplot(y=less_ave_skills.index, x='Average', data=less_ave_skills.sort_values('Average', ascending=False)) plt.xticks(np.arange(0, 5.25, 0.25)) plt.title('Average Score of the LESS Employable Students per Category') plt.show()
code
128019465/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills
code
128019465/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") ax = sns.barplot( y = 'Skills', x='Total Value', data = total.sort_values( 'Total Value', ascending = False ) ) plt.title('Summation of Values per Skill') plt.show() df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data palette_color = sns.color_palette(palette = 'deep') fig = plt.figure(figsize=(20,10)) fig.patch.set_facecolor('ghostwhite') plt.pie( pie_data['Average Score'], labels = pie_data.index, colors = palette_color, autopct = '%.0f%%' ) centre_circle = plt.Circle((0, 0), 0.72, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('Percentage Distribution of Skills for Employable on Average') plt.show() fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") sns.barplot( y = ave_skills.index, x= 'Average Score', data = ave_skills.sort_values( 'Average Score', ascending = False ) ) plt.xticks(np.arange(0,5.25,0.25)) plt.title('Average Score of the Employable Students per Category') plt.show() df_ave = pd.DataFrame({'Student': df['Name of Student'].loc[df['CLASS'] == 'Employable'], 'Average Score': df.loc[df['CLASS'] == 'Employable']._get_numeric_data().mean(axis=1)}) fig = plt.figure(figsize=(15, 7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style='darkgrid', palette='dark') ax = sns.histplot(x='Average Score', data=df_ave) df_ave.plot(kind='kde', ax=ax, secondary_y=True, color='orange') plt.xticks(np.arange(2, 6.25, 0.25)) plt.title('Mean Scores per Student Histogram for Employable') plt.show()
code
128019465/cell_3
[ "image_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape df.info()
code
128019465/cell_22
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") ax = sns.barplot( y = 'Skills', x='Total Value', data = total.sort_values( 'Total Value', ascending = False ) ) plt.title('Summation of Values per Skill') plt.show() df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data palette_color = sns.color_palette(palette = 'deep') fig = plt.figure(figsize=(20,10)) fig.patch.set_facecolor('ghostwhite') plt.pie( pie_data['Average Score'], labels = pie_data.index, colors = palette_color, autopct = '%.0f%%' ) centre_circle = plt.Circle((0, 0), 0.72, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) plt.title('Percentage Distribution of Skills for Employable on Average') plt.show() fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style="darkgrid", palette="deep") sns.barplot( y = ave_skills.index, x= 'Average Score', data = ave_skills.sort_values( 'Average Score', ascending = False ) ) plt.xticks(np.arange(0,5.25,0.25)) plt.title('Average Score of the Employable Students per Category') plt.show() df_ave = pd.DataFrame({'Student': df['Name of Student'].loc[df['CLASS'] == 'Employable'], 'Average Score': df.loc[df['CLASS'] == 'Employable']._get_numeric_data().mean(axis=1)}) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') sns.set_theme(style='darkgrid', palette = 'dark') ax = sns.histplot( x = 'Average Score', data = df_ave ) df_ave.plot( kind='kde', ax=ax, secondary_y=True, color = 'orange' ) plt.xticks(np.arange(2,6.25,0.25)) plt.title('Mean Scores per Student Histogram for Employable') plt.show() df_less = df.loc[df['CLASS'] == 'LessEmployable'].drop(columns=['CLASS', 'Name of Student']) fig = plt.figure(figsize=(15,7.5)) fig.patch.set_facecolor('ghostwhite') less_ave_skills = pd.DataFrame({ 'Average' : df_less.mean() }) sns.set_theme(style="darkgrid", palette="deep") sns.barplot( y = less_ave_skills.index, x= 'Average', data = less_ave_skills.sort_values( 'Average', ascending = False ) ) plt.xticks(np.arange(0,5.25,0.25)) plt.title('Average Score of the LESS Employable Students per Category') plt.show() df_ave_less = pd.DataFrame({'Student': df['Name of Student'].loc[df['CLASS'] == 'LessEmployable'], 'Average Score': df.loc[df['CLASS'] == 'LessEmployable']._get_numeric_data().mean(axis=1)}) df_ave_less.head()
code
128019465/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_excel('/kaggle/input/students-employability-dataset/Student-Employability-Datasets.xlsx') df.shape new_df = df.drop(columns=['CLASS', 'Name of Student', 'Student Performance Rating']) total = pd.DataFrame({'Skills': new_df.columns, 'Total Value': new_df.sum()}) df_employed = df.loc[df['CLASS'] == 'Employable'] df_employed = df_employed.drop(columns=['Name of Student', 'CLASS']) ave_skills = pd.DataFrame({'Average Score': df_employed.mean()}) ave_skills pie_data = ave_skills.drop(index=['Student Performance Rating'], axis=0) pie_data
code
328101/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='left') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['diff'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10)
code
328101/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') plt.legend() plt.show()
code
328101/cell_6
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') goods = act_train[act_train['outcome'] == 1] bads = act_train[act_train['outcome'] == 0] goods['date'].groupby(goods.date.dt.date).count().plot(figsize=(10, 5), label='Good') bads['date'].groupby(bads.date.dt.date).count().plot(figsize=(10, 5), c='r', label='Bad') plt.legend() plt.show()
code
328101/cell_10
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) people = pd.read_csv('../input/people.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'char_38': np.int32}, parse_dates=['date']) act_train = pd.read_csv('../input/act_train.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_test = pd.read_csv('../input/act_test.csv', dtype={'people_id': np.str, 'activity_id': np.str, 'otcome': np.int8}, parse_dates=['date']) act_train['date'].groupby(act_train.date.dt.date).count().plot(figsize=(10, 5), label='Train') act_test['date'].groupby(act_test.date.dt.date).count().plot(figsize=(10, 5), label='Test') positive_counts = pd.DataFrame({'positive_counts': act_train[act_train['outcome'] == 1].groupby('people_id', as_index=True).size()}).reset_index() negative_counts = pd.DataFrame({'negative_counts': act_train[act_train['outcome'] == 0].groupby('people_id', as_index=True).size()}).reset_index() hstry = positive_counts.merge(negative_counts, on='people_id', how='left') hstry['positive_counts'] = hstry['positive_counts'].fillna('0').astype(np.int64) hstry['negative_counts'] = hstry['negative_counts'].fillna('0').astype(np.int64) hstry['diff'] = hstry['positive_counts'] - hstry['negative_counts'] hstry.sort_values(by='positive_counts', ascending=False).head(10) hstry.sort_values(by='negative_counts', ascending=False).head(10)
code
17119106/cell_21
[ "text_plain_output_1.png" ]
max_seq_len
code
17119106/cell_9
[ "text_plain_output_1.png" ]
import string def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt print(clean_text('Questions for: ‘Colleges Discover the Rural St..'))
code
17119106/cell_34
[ "text_plain_output_1.png" ]
from keras.layers import Embedding, Dense, Dropout, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer import keras.utils as ku import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) def generate_padded_sequences(input_sequence): max_sequence_len = max([len(x) for x in input_sequence]) input_sequences = np.array(pad_sequences(input_sequence, maxlen=max_sequence_len, padding='pre')) predictors, labels = (input_sequences[:, :-1], input_sequences[:, -1]) labels = ku.to_categorical(labels, num_classes=total_words) return (predictors, labels, max_sequence_len) def create_model(max_seq_len, total_words): input_len = max_seq_len - 1 model = Sequential() model.add(Embedding(total_words, 10, input_length=input_len)) model.add(LSTM(100)) model.add(Dropout(0.1)) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_seq_len, total_words) model.summary() model.fit(predictors, labels, epochs=100, verbose=5) def generate_text(seed_text, next_words, model, max_seq_len): w = 0 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_seq_len - 1, padding='pre') predicted = model.predict_classes(token_list, verbose=0) output_word = '' for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text = seed_text + ' ' + output_word w = 1 return print(generate_text('united states', 5, model, max_seq_len)) print(generate_text('preident trump', 4, model, max_seq_len)) print(generate_text('donald trump', 4, model, max_seq_len)) print(generate_text('india and china', 4, model, max_seq_len)) print(generate_text('new york', 4, model, max_seq_len)) print(generate_text('science and technology', 5, model, max_seq_len))
code
17119106/cell_30
[ "text_plain_output_1.png" ]
from keras.layers import Embedding, Dense, Dropout, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer import keras.utils as ku import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) def generate_padded_sequences(input_sequence): max_sequence_len = max([len(x) for x in input_sequence]) input_sequences = np.array(pad_sequences(input_sequence, maxlen=max_sequence_len, padding='pre')) predictors, labels = (input_sequences[:, :-1], input_sequences[:, -1]) labels = ku.to_categorical(labels, num_classes=total_words) return (predictors, labels, max_sequence_len) def create_model(max_seq_len, total_words): input_len = max_seq_len - 1 model = Sequential() model.add(Embedding(total_words, 10, input_length=input_len)) model.add(LSTM(100)) model.add(Dropout(0.1)) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_seq_len, total_words) model.summary() model.fit(predictors, labels, epochs=100, verbose=5)
code
17119106/cell_33
[ "text_plain_output_1.png" ]
from keras.layers import Embedding, Dense, Dropout, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer import keras.utils as ku import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) def generate_padded_sequences(input_sequence): max_sequence_len = max([len(x) for x in input_sequence]) input_sequences = np.array(pad_sequences(input_sequence, maxlen=max_sequence_len, padding='pre')) predictors, labels = (input_sequences[:, :-1], input_sequences[:, -1]) labels = ku.to_categorical(labels, num_classes=total_words) return (predictors, labels, max_sequence_len) def create_model(max_seq_len, total_words): input_len = max_seq_len - 1 model = Sequential() model.add(Embedding(total_words, 10, input_length=input_len)) model.add(LSTM(100)) model.add(Dropout(0.1)) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_seq_len, total_words) model.summary() model.fit(predictors, labels, epochs=100, verbose=5) def generate_text(seed_text, next_words, model, max_seq_len): w = 0 for _ in range(next_words): token_list = tokenizer.texts_to_sequences([seed_text])[0] token_list = pad_sequences([token_list], maxlen=max_seq_len - 1, padding='pre') predicted = model.predict_classes(token_list, verbose=0) output_word = '' for word, index in tokenizer.word_index.items(): if index == predicted: output_word = word break seed_text = seed_text + ' ' + output_word w = 1 return print(generate_text('united states', 5, model, max_seq_len))
code
17119106/cell_20
[ "text_plain_output_1.png" ]
from keras.preprocessing.sequence import pad_sequences from keras.preprocessing.text import Tokenizer import keras.utils as ku import numpy as np # linear algebra import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) def generate_padded_sequences(input_sequence): max_sequence_len = max([len(x) for x in input_sequence]) input_sequences = np.array(pad_sequences(input_sequence, maxlen=max_sequence_len, padding='pre')) predictors, labels = (input_sequences[:, :-1], input_sequences[:, -1]) labels = ku.to_categorical(labels, num_classes=total_words) return (predictors, labels, max_sequence_len) len(labels)
code
17119106/cell_6
[ "text_plain_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: if x == 0: print(filename) article_df = pd.read_csv(currr_dir + filename) if x == 0: print(article_df.shape) print(article_df.columns) print(article_df.head(5)) print(article_df.tail(5)) all_headlines.extend(list(article_df.headline.values)) if x == 0: print(article_df.headline) print(article_df.headline.values) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] print(len(all_headlines)) print(all_headlines[:5])
code
17119106/cell_29
[ "text_plain_output_1.png" ]
from keras.layers import Embedding, Dense, Dropout, LSTM from keras.models import Sequential from keras.preprocessing.text import Tokenizer import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) def create_model(max_seq_len, total_words): input_len = max_seq_len - 1 model = Sequential() model.add(Embedding(total_words, 10, input_length=input_len)) model.add(LSTM(100)) model.add(Dropout(0.1)) model.add(Dense(total_words, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') return model model = create_model(max_seq_len, total_words) model.summary()
code
17119106/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17119106/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] print(corpus[:10])
code
17119106/cell_19
[ "text_plain_output_1.png" ]
len(predictors)
code
17119106/cell_15
[ "text_plain_output_1.png" ]
input_sequence[:10]
code
17119106/cell_14
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) input_sequence, total_words = get_sequence_of_tokens(corpus) print(total_words)
code
17119106/cell_22
[ "text_plain_output_1.png" ]
from keras.preprocessing.text import Tokenizer import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import string import numpy as np import pandas as pd import os currr_dir = '../input/' all_headlines = [] x = 0 for filename in os.listdir(currr_dir): if 'Articles' in filename: article_df = pd.read_csv(currr_dir + filename) all_headlines.extend(list(article_df.headline.values)) x = 1 break all_headlines = [h for h in all_headlines if h != 'Unknown'] def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt corpus = [clean_text(x) for x in all_headlines] tokenizer = Tokenizer() def get_sequence_of_tokens(corpus): q = 0 tokenizer.fit_on_texts(corpus) total_words = len(tokenizer.word_index) + 1 input_sequences = [] for line in corpus: token_list = tokenizer.texts_to_sequences([line])[0] for i in range(1, len(token_list)): n_gram_sequence = token_list[:i + 1] input_sequences.append(n_gram_sequence) q = 1 return (input_sequences, total_words) print(total_words)
code
17119106/cell_10
[ "text_plain_output_1.png" ]
import string def clean_text(txt): txt = ''.join((w for w in txt if w not in string.punctuation)).lower() txt = txt.encode('utf8').decode('ascii', 'ignore') return txt print(string.punctuation)
code
17119106/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from numpy.random import seed from tensorflow import set_random_seed import warnings from keras.models import Sequential from keras.layers import Embedding, Dense, Dropout, LSTM from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import keras.utils as ku from keras.callbacks import EarlyStopping from tensorflow import set_random_seed from numpy.random import seed set_random_seed(2) seed(1) import string import os import warnings warnings.filterwarnings('ignore') warnings.simplefilter(action='ignore', category=FutureWarning)
code
1009465/cell_7
[ "text_plain_output_1.png" ]
train < -read.table('../input/train.csv', sep=',') test < -read.table('../input/test.csv', sep=',') train < -read.table('../input/train.csv', sep=',', header=TRUE) test < -read.table('../input/test.csv', sep=',', header=TRUE) str(train)
code
1009465/cell_5
[ "text_plain_output_1.png" ]
str(train)
code
18157731/cell_21
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True) learn.unfreeze() learn.lr_find(start_lr=1e-06, end_lr=0.0001) lr = 1e-05 learn.fit_one_cycle(5, max_lr=lr) interp = ClassificationInterpretation.from_learner(learn) losses, idxs = interp.top_losses() len(data.valid_ds) == len(losses) == len(idxs) doc(interp.plot_top_losses) interp.most_confused(min_val=2)
code
18157731/cell_13
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes (data.classes, data.c, len(data.train_ds), len(data.valid_ds)) learn = cnn_learner(data, models.resnet50, metrics=error_rate) learn.fit_one_cycle(50) learn.model_dir = '/kaggle/working' learn.save('resnet50_224', return_path=True)
code
18157731/cell_9
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra path = Path('../input/dataset') train = path / 'training_set' test = path / 'test_set' np.random.seed(42) data = ImageDataBunch.from_folder(train, train='.', valid_pct=0.2, ds_tfms=get_transforms(do_flip=True), size=224, num_workers=4).normalize(imagenet_stats) data.classes data.show_batch(rows=3, figsize=(7, 8))
code