path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128030195/cell_14
[ "text_plain_output_1.png" ]
from keras.models import Model, load_model from keras.utils.vis_utils import plot_model from tensorflow.keras.applications import ResNet50 from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPool2D, Flatten ,Activation,Dropout,BatchNormalization from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.models import Sequential import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images_validation = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='validation', interpolation='bilinear') early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model import tensorflow.keras as keras resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(256, 256, 3), pooling='max') output = resnet.layers[-1].output output = tf.keras.layers.Flatten()(output) resnet = Model(resnet.input, output) res_name = [] for layer in resnet.layers: res_name.append(layer.name) set_trainable = False for layer in resnet.layers: if layer.name in res_name[-22:]: set_trainable = True if set_trainable: layer.trainable = True else: layer.trainable = False from tensorflow.keras.applications import ResNet50 from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout model6 = Sequential() model6.add(resnet) model6.add(BatchNormalization()) model6.add(Dense(2048, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(1024, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(512, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(256, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(128, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(64, activation='relu')) model6.add(Dropout(0.2)) model6.add(Dense(31, activation='softmax')) model6.summary() from keras.utils.vis_utils import plot_model plot_model(model6, show_shapes=True, show_layer_names=True, to_file='model.png')
code
128030195/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.models import Model, load_model from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images_validation = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='validation', interpolation='bilinear') early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=2, restore_best_weights=True) from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.models import Model import tensorflow.keras as keras resnet = ResNet50(include_top=False, weights='imagenet', input_shape=(256, 256, 3), pooling='max') output = resnet.layers[-1].output output = tf.keras.layers.Flatten()(output) resnet = Model(resnet.input, output) res_name = [] for layer in resnet.layers: res_name.append(layer.name)
code
128030195/cell_5
[ "text_plain_output_35.png", "text_plain_output_37.png", "text_plain_output_5.png", "text_plain_output_30.png", "text_plain_output_15.png", "text_plain_output_9.png", "text_plain_output_40.png", "text_plain_output_31.png", "text_plain_output_20.png", "text_plain_output_4.png", "text_plain_output_13.png", "text_plain_output_14.png", "text_plain_output_32.png", "text_plain_output_29.png", "text_plain_output_27.png", "text_plain_output_10.png", "text_plain_output_6.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_25.png", "text_plain_output_18.png", "text_plain_output_36.png", "text_plain_output_3.png", "text_plain_output_22.png", "text_plain_output_38.png", "text_plain_output_7.png", "text_plain_output_16.png", "text_plain_output_8.png", "text_plain_output_26.png", "text_plain_output_34.png", "text_plain_output_23.png", "text_plain_output_28.png", "text_plain_output_2.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_39.png", "text_plain_output_19.png", "text_plain_output_17.png", "text_plain_output_11.png", "text_plain_output_12.png" ]
import tensorflow as tf data_dir = '/kaggle/input/rgb-arabic-alphabets-sign-language-dataset-jpg/images_after' images = tf.keras.utils.image_dataset_from_directory(data_dir, labels='inferred', label_mode='int', class_names=None, color_mode='rgb', batch_size=32, image_size=(256, 256), shuffle=True, seed=123, validation_split=0.2, subset='training', interpolation='bilinear') class_names = images.class_names class_names
code
90116663/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) print(L) print('---------') print(m)
code
90116663/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) print('f : ', f)
code
90116663/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) print('a : ', a.ndim) print('b : ', b.ndim)
code
90116663/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) print('a : ', a.size) print('b : ', b.size)
code
90116663/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np print(np.random.randint(10, size=7))
code
90116663/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) print(k)
code
90116663/cell_7
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) print(a.dtype) print(b.dtype)
code
90116663/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) n = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) o, p, r = np.split(n, [3, 5]) s = np.arange(16).reshape(4, 4) print(np.hsplit(s, [2]))
code
90116663/cell_8
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) print('c : ', c) print() print('3x3 matris hali : \n\n', c.reshape(3, 3))
code
90116663/cell_15
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) n = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) o, p, r = np.split(n, [3, 5]) print(o) print(p) print(r)
code
90116663/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) n = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) o, p, r = np.split(n, [3, 5]) s = np.arange(16).reshape(4, 4) print(s)
code
90116663/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) n = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) o, p, r = np.split(n, [3, 5]) s = np.arange(16).reshape(4, 4) print(np.vsplit(s, [2]))
code
90116663/cell_14
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) i = np.concatenate([d, e, g]) j = np.array([[1, 2, 3], [4, 5, 6]]) k = np.concatenate([j, j]) L = np.concatenate([j, j], axis=0) m = np.concatenate([j, j], axis=1) n = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) print(np.split(n, [3, 5]))
code
90116663/cell_10
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) c = np.random.randint(7, size=9, dtype=int) d = np.array([1, 2, 3]) e = np.array([4, 5, 6]) f = np.concatenate([d, e]) g = np.array([7, 8, 9]) h = np.concatenate([f, g]) print(h) print('-------------------') i = np.concatenate([d, e, g]) print(i)
code
90116663/cell_5
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np a = np.random.randint(10, size=7) b = np.random.randint(10, size=(3, 5)) print('a : ', a.shape) print('b : ', b.shape)
code
74042282/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from google.colab import drive from tqdm import tqdm import glob import json import numpy as np import os import os def is_in_ipython(): """Is the code running in the ipython environment (jupyter including)""" program_name = os.path.basename(os.getenv('_', '')) if 'jupyter-notebook' in program_name or 'ipython' in program_name or 'jupyter' in program_name or ('JPY_PARENT_PID' in os.environ): return True else: return False def is_in_colab(): if not is_in_ipython(): return False try: from google import colab return True except: return False def is_in_kaggle_kernal(): if 'kaggle' in os.environ['PYTHONPATH']: return True else: return False if is_in_colab(): from google.colab import drive drive.mount('/content/gdrive') os.environ['TRIDENT_BACKEND'] = 'pytorch' kaggle_kernal = None if is_in_kaggle_kernal(): os.environ['TRIDENT_HOME'] = './trident' elif is_in_colab(): os.environ['TRIDENT_HOME'] = '/content/gdrive/My Drive/trident' class_names = ['未配戴口罩', '正確配戴口罩', '未正確配戴口罩', '配戴無防護能力口罩'] palette = [(0, 255, 255), (255, 255, 128), (255, 0, 255), (0, 255, 0)] imgs = glob.glob('../input/maskedfacesdetection/imgs/*.*g') path_dict = {} for im_path in imgs: folder, filename = os.path.split(os.path.normpath(im_path)) folder, file, ext = split_path(filename) path_dict[file] = im_path annos = OrderedDict() jsons = glob.glob('../input/maskedfacesdetection/tags/*.json') images = [] boxes = [] for j in tqdm(range(len(jsons))): j_item = jsons[j] try: folder, file, ext = split_path(j_item) box_collection = [] label_collection = [] if file in path_dict and os.path.exists(path_dict[file]): img_path = path_dict[file] f = open(j_item, 'r', encoding='utf-8-sig') im = image2array(img_path) tagging = json.load(f) w = im.shape[1] h = im.shape[0] if len(tagging['outputs']) > 0 and len(tagging['outputs']['object']) > 0: folder, file, ext = split_path(img_path) annos[file + ext] = [] for i in range(len(tagging['outputs']['object'])): tag = tagging['outputs']['object'][i] label = int(tag['name']) + 1 x1 = max(tag['bndbox']['xmin'], 0) y1 = max(tag['bndbox']['ymin'], 0) x2 = min(tag['bndbox']['xmax'], w) y2 = min(tag['bndbox']['ymax'], h) bbox = [x1, y1, x2, y2, label] annos[file + ext].append(np.array([x1, y1, x2, y2, label])) box_collection.append(bbox) box_collection = np.array(box_collection, dtype=np.float32) boxes.append(box_collection) images.append(img_path) except: pickle_it('annos1.pkl', annos) print(boxes[:20]) print('') print(images[:20])
code
74042282/cell_8
[ "text_plain_output_1.png" ]
from PIL import Image, ImageDraw from google.colab import drive from tqdm import tqdm import glob import json import numpy as np import os import random import os def is_in_ipython(): """Is the code running in the ipython environment (jupyter including)""" program_name = os.path.basename(os.getenv('_', '')) if 'jupyter-notebook' in program_name or 'ipython' in program_name or 'jupyter' in program_name or ('JPY_PARENT_PID' in os.environ): return True else: return False def is_in_colab(): if not is_in_ipython(): return False try: from google import colab return True except: return False def is_in_kaggle_kernal(): if 'kaggle' in os.environ['PYTHONPATH']: return True else: return False if is_in_colab(): from google.colab import drive drive.mount('/content/gdrive') os.environ['TRIDENT_BACKEND'] = 'pytorch' kaggle_kernal = None if is_in_kaggle_kernal(): os.environ['TRIDENT_HOME'] = './trident' elif is_in_colab(): os.environ['TRIDENT_HOME'] = '/content/gdrive/My Drive/trident' class_names = ['未配戴口罩', '正確配戴口罩', '未正確配戴口罩', '配戴無防護能力口罩'] palette = [(0, 255, 255), (255, 255, 128), (255, 0, 255), (0, 255, 0)] imgs = glob.glob('../input/maskedfacesdetection/imgs/*.*g') path_dict = {} for im_path in imgs: folder, filename = os.path.split(os.path.normpath(im_path)) folder, file, ext = split_path(filename) path_dict[file] = im_path annos = OrderedDict() jsons = glob.glob('../input/maskedfacesdetection/tags/*.json') images = [] boxes = [] for j in tqdm(range(len(jsons))): j_item = jsons[j] try: folder, file, ext = split_path(j_item) box_collection = [] label_collection = [] if file in path_dict and os.path.exists(path_dict[file]): img_path = path_dict[file] f = open(j_item, 'r', encoding='utf-8-sig') im = image2array(img_path) tagging = json.load(f) w = im.shape[1] h = im.shape[0] if len(tagging['outputs']) > 0 and len(tagging['outputs']['object']) > 0: folder, file, ext = split_path(img_path) annos[file + ext] = [] for i in range(len(tagging['outputs']['object'])): tag = tagging['outputs']['object'][i] label = int(tag['name']) + 1 x1 = max(tag['bndbox']['xmin'], 0) y1 = max(tag['bndbox']['ymin'], 0) x2 = min(tag['bndbox']['xmax'], w) y2 = min(tag['bndbox']['ymax'], h) bbox = [x1, y1, x2, y2, label] annos[file + ext].append(np.array([x1, y1, x2, y2, label])) box_collection.append(bbox) box_collection = np.array(box_collection, dtype=np.float32) boxes.append(box_collection) images.append(img_path) except: pickle_it('annos1.pkl', annos) #示範如何讀取人的眶以及對應的關鍵點 from PIL import Image, ImageDraw import builtins def draw_sample(idx=0): keys=annos.key_list key_idx=keys[idx] boxes=annos[keys[idx]] pillow_img=array2image(image2array(os.path.join('../input/maskedfacesdetection/imgs/{0}'.format(key_idx)))) for box in boxes: this_box = box[:4] this_label = box[4] thiscolor=palette[this_label-1] pillow_img=plot_bbox(this_box, pillow_img, thiscolor,this_label, line_thickness=2) draw = ImageDraw.Draw(pillow_img) return pillow_img.resize((pillow_img.width*3,pillow_img.height*3)) draw_sample(random.choice(list(range(len(annos)))))
code
74042282/cell_3
[ "text_plain_output_1.png" ]
#為確保安裝最新版 !pip uninstall tridentx -y !pip install ../input/trident/tridentx-0.7.3.20-py3-none-any.whl --upgrade import re import pandas import json import copy import numpy as np #調用trident api import random from tqdm import tqdm import scipy import time import glob import trident as T from trident import * from trident.models import rfbnet
code
74042282/cell_10
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from trident.models import rfbnet new_rfbmodel = rfbnet.RfbNet(pretrianed=False, num_classes=5, num_regressors=4) new_rfbmodel.model.trainable = True new_rfbmodel.summary()
code
74042282/cell_5
[ "image_output_1.png" ]
from google.colab import drive from tqdm import tqdm import glob import json import numpy as np import os import os def is_in_ipython(): """Is the code running in the ipython environment (jupyter including)""" program_name = os.path.basename(os.getenv('_', '')) if 'jupyter-notebook' in program_name or 'ipython' in program_name or 'jupyter' in program_name or ('JPY_PARENT_PID' in os.environ): return True else: return False def is_in_colab(): if not is_in_ipython(): return False try: from google import colab return True except: return False def is_in_kaggle_kernal(): if 'kaggle' in os.environ['PYTHONPATH']: return True else: return False if is_in_colab(): from google.colab import drive drive.mount('/content/gdrive') os.environ['TRIDENT_BACKEND'] = 'pytorch' kaggle_kernal = None if is_in_kaggle_kernal(): os.environ['TRIDENT_HOME'] = './trident' elif is_in_colab(): os.environ['TRIDENT_HOME'] = '/content/gdrive/My Drive/trident' class_names = ['未配戴口罩', '正確配戴口罩', '未正確配戴口罩', '配戴無防護能力口罩'] palette = [(0, 255, 255), (255, 255, 128), (255, 0, 255), (0, 255, 0)] imgs = glob.glob('../input/maskedfacesdetection/imgs/*.*g') path_dict = {} for im_path in imgs: folder, filename = os.path.split(os.path.normpath(im_path)) folder, file, ext = split_path(filename) path_dict[file] = im_path annos = OrderedDict() jsons = glob.glob('../input/maskedfacesdetection/tags/*.json') images = [] boxes = [] for j in tqdm(range(len(jsons))): j_item = jsons[j] try: folder, file, ext = split_path(j_item) box_collection = [] label_collection = [] if file in path_dict and os.path.exists(path_dict[file]): img_path = path_dict[file] f = open(j_item, 'r', encoding='utf-8-sig') im = image2array(img_path) tagging = json.load(f) w = im.shape[1] h = im.shape[0] if len(tagging['outputs']) > 0 and len(tagging['outputs']['object']) > 0: folder, file, ext = split_path(img_path) annos[file + ext] = [] for i in range(len(tagging['outputs']['object'])): tag = tagging['outputs']['object'][i] label = int(tag['name']) + 1 x1 = max(tag['bndbox']['xmin'], 0) y1 = max(tag['bndbox']['ymin'], 0) x2 = min(tag['bndbox']['xmax'], w) y2 = min(tag['bndbox']['ymax'], h) bbox = [x1, y1, x2, y2, label] annos[file + ext].append(np.array([x1, y1, x2, y2, label])) box_collection.append(bbox) box_collection = np.array(box_collection, dtype=np.float32) boxes.append(box_collection) images.append(img_path) except: print(j_item) print('boxes', len(boxes), 'images', len(images)) pickle_it('annos1.pkl', annos)
code
17113265/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) plt.figure(figsize=(10, 8)) sns.heatmap(cm_svm, annot=True, cmap='Blues', xticklabels=np.arange(1, 8), yticklabels=np.arange(1, 8)) plt.show()
code
17113265/cell_13
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) print('x_train shape : ', x_train.shape) print('x_test shape : ', x_test.shape) print('y_train shape : ', y_train.shape) print('y_test shape : ', y_test.shape)
code
17113265/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') print(zoo.class_type.value_counts()) plt.figure(figsize=(10, 8)) sns.countplot(zoo.class_type) plt.show()
code
17113265/cell_25
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i print('Best K value = ', index) print('Best score = ', scr_max) plt.figure(figsize=(15, 10)) plt.plot(range(1, x_train.shape[0] + 1), knn_test_score_list, label='test') plt.plot(range(1, x_train.shape[0] + 1), knn_train_score_list, label='train') plt.legend() plt.xlabel('K Values') plt.ylabel('Scores') plt.show()
code
17113265/cell_4
[ "text_plain_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') import os print(os.listdir('../input'))
code
17113265/cell_30
[ "image_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i from sklearn.tree import DecisionTreeClassifier dec_tree = DecisionTreeClassifier(random_state=42) dec_tree.fit(x_train, y_train) y_pred_tree = dec_tree.predict(x_test) print('Test Accurary : ', dec_tree.score(x_test, y_test)) print('Train Accurary : ', dec_tree.score(x_train, y_train))
code
17113265/cell_20
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) print('confusion matrix : \n', cm_svm) print('classification report : \n', cr_svm)
code
17113265/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) zoo = pd.read_csv('../input/zoo.csv') zoo.head()
code
17113265/cell_26
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i knn = KNeighborsClassifier(n_neighbors=1) knn.fit(x_train, y_train) y_pred_knn = knn.predict(x_test) cr_knn = classification_report(y_test, y_pred_knn) cm_knn = confusion_matrix(y_test, y_pred_knn) print('confusion matrix : \n', cm_knn) print('classification report : \n', cr_knn)
code
17113265/cell_19
[ "text_html_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.svm import SVC import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) print('Train Accurary : ', svm.score(x_train, y_train)) print('Test Accuray : ', svm.score(x_test, y_test))
code
17113265/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) zoo = pd.read_csv('../input/zoo.csv') zoo.info()
code
17113265/cell_32
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i knn = KNeighborsClassifier(n_neighbors=1) knn.fit(x_train, y_train) y_pred_knn = knn.predict(x_test) cr_knn = classification_report(y_test, y_pred_knn) cm_knn = confusion_matrix(y_test, y_pred_knn) from sklearn.tree import DecisionTreeClassifier dec_tree = DecisionTreeClassifier(random_state=42) dec_tree.fit(x_train, y_train) y_pred_tree = dec_tree.predict(x_test) cm_tree = confusion_matrix(y_test, y_pred_tree) cr_tree = classification_report(y_test, y_pred_tree) plt.figure(figsize=(10, 8)) sns.heatmap(cm_tree, annot=True, xticklabels=np.arange(1, 8), yticklabels=np.arange(1, 8), cmap='Greens') plt.show()
code
17113265/cell_31
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i from sklearn.tree import DecisionTreeClassifier dec_tree = DecisionTreeClassifier(random_state=42) dec_tree.fit(x_train, y_train) y_pred_tree = dec_tree.predict(x_test) cm_tree = confusion_matrix(y_test, y_pred_tree) cr_tree = classification_report(y_test, y_pred_tree) print('confusion matrix : \n', cm_tree) print('classification report : \n', cr_tree)
code
17113265/cell_27
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix,classification_report from sklearn.model_selection import train_test_split from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns plt.style.use('ggplot') zoo = pd.read_csv('../input/zoo.csv') data = zoo.copy() data.drop('animal_name', axis=1, inplace=True) x = data.drop('class_type', axis=1) y = data.class_type.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=42) from sklearn.svm import SVC svm = SVC(random_state=42, kernel='linear') svm.fit(x_train, y_train) y_pred_svm = svm.predict(x_test) from sklearn.metrics import confusion_matrix, classification_report cm_svm = confusion_matrix(y_test, y_pred_svm) cr_svm = classification_report(y_test, y_pred_svm) from sklearn.neighbors import KNeighborsClassifier scr_max = 0 knn_test_score_list = [] knn_train_score_list = [] for i in range(1, x_train.shape[0] + 1): knn = KNeighborsClassifier(n_neighbors=i) knn.fit(x_train, y_train) knn_test_scr = knn.score(x_test, y_test) knn_test_score_list.append(knn_test_scr) knn_train_scr = knn.score(x_train, y_train) knn_train_score_list.append(knn_train_scr) if knn_test_scr >= scr_max: scr_max = knn_test_scr index = i knn = KNeighborsClassifier(n_neighbors=1) knn.fit(x_train, y_train) y_pred_knn = knn.predict(x_test) cr_knn = classification_report(y_test, y_pred_knn) cm_knn = confusion_matrix(y_test, y_pred_knn) plt.figure(figsize=(10, 8)) sns.heatmap(cm_knn, annot=True, xticklabels=np.arange(1, 8), yticklabels=np.arange(1, 8)) plt.show()
code
105212440/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings('ignore') from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) import cv2 import os
code
105212440/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.pyplot as plt import numpy as np # linear algebra import os train_cats = '../input/cat-and-dogs/dataset/training_set/cats' train_dogs = '../input/cat-and-dogs/dataset/training_set/dogs' test_cats = '../input/cat-and-dogs/dataset/training_set/cats' test_dogs = '../input/cat-and-dogs/dataset/training_set/dogs' simgle_image = '../input/cat-and-dogs/dataset/single_prediction' image_size = 128 for image1 in os.listdir(train_cats): path = os.path.join(train_cats, image1) img1 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) img1 = cv2.resize(img1, (image_size, image_size)).flatten() np_img1 = np.asarray(img1) for image2 in os.listdir(train_dogs): path = os.path.join(train_dogs, image2) img2 = cv2.imread(path, cv2.IMREAD_GRAYSCALE) img2 = cv2.resize(img2, (image_size, image_size)).flatten() np_img2 = np.asarray(img2) plt.figure(figsize=(10, 10)) plt.subplot(1, 2, 1) plt.imshow(np_img1.reshape(image_size, image_size)) plt.axis('off') plt.subplot(1, 2, 2) plt.imshow(np_img2.reshape(image_size, image_size)) plt.axis('off')
code
104126200/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import random import torch def set_seed(seed=0): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) set_seed() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device
code
104126200/cell_29
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader from torchvision import transforms import numpy as np import random import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torchvision def set_seed(seed=0): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) set_seed() BATCH_SIZE = 64 LEARNING_RATE = 0.01 N_EPOCHS = 20 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=False) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] class ConvNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=0) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0) self.flat = nn.Flatten() self.fc1 = nn.Linear(in_features=32 * 5 * 5, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=10) self.relu = nn.ReLU() def forward(self, x): out = self.conv1(x) out = self.relu(out) out = self.pool(out) out = self.conv2(out) out = self.relu(out) out = self.pool(out) out = self.flat(out) out = self.fc1(out) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) return out model = ConvNet().to(device) loss = nn.CrossEntropyLoss() optimiser = optim.SGD(params=model.parameters(), lr=LEARNING_RATE) scheduler = lr_scheduler.CosineAnnealingLR(optimiser, T_max=N_EPOCHS) for epoch in range(N_EPOCHS): loss_acc = 0 train_count = 0 for i, (imgs, labels) in enumerate(train_loader): imgs = imgs.to(device) labels = labels.to(device) preds = model(imgs) L = loss(preds, labels) L.backward() optimiser.step() optimiser.zero_grad() loss_acc += L.detach().item() train_count += 1 scheduler.step() PATH = 'ConvNet.pt' torch.save({'epoch': N_EPOCHS, 'model_state_dict': model.state_dict(), 'optimiser_state_dict': optimiser.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'loss': loss_acc / train_count}, PATH) model = ConvNet().to(device) optimiser = optim.SGD(params=model.parameters(), lr=LEARNING_RATE) scheduler = lr_scheduler.CosineAnnealingLR(optimiser, T_max=N_EPOCHS) checkpoint = torch.load(PATH) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimiser_state_dict']) scheduler.load_state_dict(checkpoint['scheduler_state_dict']) epoch = checkpoint['epoch'] loss = checkpoint['loss'] model.eval() with torch.no_grad(): n_correct = 0 n_samples = 0 n_class_correct = [0 for i in range(10)] n_class_sample = [0 for i in range(10)] for imgs, labels in test_loader: imgs = imgs.to(device) labels = labels.to(device) output = model(imgs) _, preds = torch.max(output, 1) n_samples += labels.shape[0] n_correct += (preds == labels).sum().item() for i in range(BATCH_SIZE): try: label = labels[i].item() pred = preds[i].item() except: break if label == pred: n_class_correct[label] += 1 n_class_sample[label] += 1 acc = 100 * n_correct / n_samples print(f'Overall accuracy on test set: {acc:.1f} %') for i in range(10): print(f'Accuracy of {classes[i]}: {100 * n_class_correct[i] / n_class_sample[i]:.1f} %')
code
104126200/cell_26
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader from torchvision import transforms import numpy as np import random import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torchvision def set_seed(seed=0): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) set_seed() BATCH_SIZE = 64 LEARNING_RATE = 0.01 N_EPOCHS = 20 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=False) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False) class ConvNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=0) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0) self.flat = nn.Flatten() self.fc1 = nn.Linear(in_features=32 * 5 * 5, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=10) self.relu = nn.ReLU() def forward(self, x): out = self.conv1(x) out = self.relu(out) out = self.pool(out) out = self.conv2(out) out = self.relu(out) out = self.pool(out) out = self.flat(out) out = self.fc1(out) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) return out model = ConvNet().to(device) loss = nn.CrossEntropyLoss() optimiser = optim.SGD(params=model.parameters(), lr=LEARNING_RATE) scheduler = lr_scheduler.CosineAnnealingLR(optimiser, T_max=N_EPOCHS) for epoch in range(N_EPOCHS): loss_acc = 0 train_count = 0 for i, (imgs, labels) in enumerate(train_loader): imgs = imgs.to(device) labels = labels.to(device) preds = model(imgs) L = loss(preds, labels) L.backward() optimiser.step() optimiser.zero_grad() loss_acc += L.detach().item() train_count += 1 scheduler.step() PATH = 'ConvNet.pt' torch.save({'epoch': N_EPOCHS, 'model_state_dict': model.state_dict(), 'optimiser_state_dict': optimiser.state_dict(), 'scheduler_state_dict': scheduler.state_dict(), 'loss': loss_acc / train_count}, PATH) model = ConvNet().to(device) optimiser = optim.SGD(params=model.parameters(), lr=LEARNING_RATE) scheduler = lr_scheduler.CosineAnnealingLR(optimiser, T_max=N_EPOCHS) checkpoint = torch.load(PATH) model.load_state_dict(checkpoint['model_state_dict']) optimiser.load_state_dict(checkpoint['optimiser_state_dict']) scheduler.load_state_dict(checkpoint['scheduler_state_dict']) epoch = checkpoint['epoch'] loss = checkpoint['loss'] model.eval()
code
104126200/cell_14
[ "text_plain_output_1.png" ]
from torchvision import transforms import torchvision transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=False)
code
104126200/cell_22
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader from torchvision import transforms import numpy as np import random import torch import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torchvision def set_seed(seed=0): np.random.seed(seed) random.seed(seed) torch.manual_seed(seed) set_seed() BATCH_SIZE = 64 LEARNING_RATE = 0.01 N_EPOCHS = 20 device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, transform=transform, download=True) test_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, transform=transform, download=False) train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True) test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False) class ConvNet(nn.Module): def __init__(self): super().__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5, stride=1, padding=0) self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0) self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0) self.flat = nn.Flatten() self.fc1 = nn.Linear(in_features=32 * 5 * 5, out_features=128) self.fc2 = nn.Linear(in_features=128, out_features=64) self.fc3 = nn.Linear(in_features=64, out_features=10) self.relu = nn.ReLU() def forward(self, x): out = self.conv1(x) out = self.relu(out) out = self.pool(out) out = self.conv2(out) out = self.relu(out) out = self.pool(out) out = self.flat(out) out = self.fc1(out) out = self.relu(out) out = self.fc2(out) out = self.relu(out) out = self.fc3(out) return out model = ConvNet().to(device) loss = nn.CrossEntropyLoss() optimiser = optim.SGD(params=model.parameters(), lr=LEARNING_RATE) scheduler = lr_scheduler.CosineAnnealingLR(optimiser, T_max=N_EPOCHS) for epoch in range(N_EPOCHS): loss_acc = 0 train_count = 0 for i, (imgs, labels) in enumerate(train_loader): imgs = imgs.to(device) labels = labels.to(device) preds = model(imgs) L = loss(preds, labels) L.backward() optimiser.step() optimiser.zero_grad() loss_acc += L.detach().item() train_count += 1 scheduler.step() if (epoch + 1) % 1 == 0: print(f'Epoch {epoch + 1}/{N_EPOCHS}, loss {loss_acc / train_count:.5f}') print('') print('Training complete')
code
73073677/cell_21
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}' fig, ax = plt.subplots(1,2, sharey=True) ax[0].boxplot(calendar['price']) ax[0].set_xlabel('Price') ax[1].boxplot(calendar.adjusted_price) ax[1].set_xlabel('Adjusted Price') plt.show() fig, ax = plt.subplots(1,2, sharey=True) ax[0].hist(calendar['price']) ax[0].set_xlabel('Price') ax[1].hist(calendar.adjusted_price) ax[1].set_xlabel('Adjusted Price') plt.show() mean_price_month = calendar.groupby('month').mean() mean_price_month fig, ax = plt.subplots() ax.set_title('Preço médio ao longo do ano.') ax.bar(x=mean_price_month.index, height=mean_price_month.price) ax.set_xlabel('Mes') ax.set_xticks(ticks=mean_price_month.index) ax.set_ylabel('Preço médio') plt.show()
code
73073677/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes calendar.head(10)
code
73073677/cell_25
[ "image_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) reviews.shape reviews.head()
code
73073677/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar.info()
code
73073677/cell_23
[ "image_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) reviews.shape
code
73073677/cell_30
[ "text_html_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) reviews.shape reviews = reviews[['listing_id', 'id', 'date', 'year', 'month', 'day', 'reviewer_id', 'reviewer_name', 'comments']] print(reviews.date.min()) print(reviews.date.max())
code
73073677/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}' fig, ax = plt.subplots(1,2, sharey=True) ax[0].boxplot(calendar['price']) ax[0].set_xlabel('Price') ax[1].boxplot(calendar.adjusted_price) ax[1].set_xlabel('Adjusted Price') plt.show() fig, ax = plt.subplots(1, 2, sharey=True) ax[0].hist(calendar['price']) ax[0].set_xlabel('Price') ax[1].hist(calendar.adjusted_price) ax[1].set_xlabel('Adjusted Price') plt.show()
code
73073677/cell_29
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) reviews.shape reviews = reviews[['listing_id', 'id', 'date', 'year', 'month', 'day', 'reviewer_id', 'reviewer_name', 'comments']] print(f'Numero de imoveis listados com review: {len(reviews.listing_id.unique())}')
code
73073677/cell_18
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}' fig, ax = plt.subplots(1, 2, sharey=True) ax[0].boxplot(calendar['price']) ax[0].set_xlabel('Price') ax[1].boxplot(calendar.adjusted_price) ax[1].set_xlabel('Adjusted Price') plt.show()
code
73073677/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}' print(f'Primeiro dia: {calendar.date.min()}') print(f'Último dia: {calendar.date.max()}')
code
73073677/cell_16
[ "text_html_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}' print(f'Menor preço: {calendar.price.min()}') print(f'Maior preço: {calendar.price.max()}')
code
73073677/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape)
code
73073677/cell_24
[ "image_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) reviews.shape reviews.info()
code
73073677/cell_14
[ "text_html_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes f'Número de imoveis listados: {len(calendar.listing_id.unique())}'
code
73073677/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) listings.isnull().sum()
code
73073677/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar = calendar[['listing_id', 'date', 'year', 'month', 'day', 'available', 'price', 'adjusted_price', 'minimum_nights', 'maximum_nights']] calendar.dtypes
code
73073677/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) calendar.head()
code
73073677/cell_36
[ "text_plain_output_1.png" ]
import pandas as pd calendar = pd.read_csv('../input/prague-airbnb/Prague_calendar.csv') listings = pd.read_csv('../input/prague-airbnb/Prague_listings.csv') reviews = pd.read_csv('../input/prague-airbnb/Prague_reviews.csv') (calendar.shape, listings.shape, reviews.shape) listings.info()
code
129031792/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import json import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import tensorflow as tf import urllib import urllib import zipfile def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss='mean_squared_error', optimizer='sgd') model.fit(xs, ys, epochs=1000) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5') def solution_model(): fashion_mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (val_images, val_label) = fashion_mnist.load_data() training_images = training_images / 255.0 val_images = val_images / 255.0 training_images = np.expand_dims(training_images, axis=3) val_images = np.expand_dims(val_images, axis=3) model = tf.keras.Sequential([tf.keras.layers.Conv2D(14, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) model.fit(training_images, training_labels, validation_data=(val_images, val_label), epochs=5) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5') def solution_model(): _TRAIN_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/horse-or-human.zip' _TEST_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/validation-horse-or-human.zip' urllib.request.urlretrieve(_TRAIN_URL, 'horse-or-human.zip') local_zip = 'horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('tmp/horse-or-human/') zip_ref.close() urllib.request.urlretrieve(_TEST_URL, 'testdata.zip') local_zip = 'testdata.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('tmp/testdata/') zip_ref.close() training_data = 'tmp/horse-or-human/' val_data = 'tmp/testdata/' train_datagen = ImageDataGenerator(rescale=1.0 / 255) validation_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_generator = train_datagen.flow_from_directory(training_data, target_size=(300, 300), batch_size=128, class_mode='binary') validation_generator = validation_datagen.flow_from_directory(val_data, target_size=(300, 300), batch_size=64, class_mode='binary') model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy']) model.fit(train_generator, epochs=10, verbose=1, validation_data=validation_generator) return model if __name__ == '__main__': model = solution_model() model.save('mymodel2.h5') def solution_model(): url = 'https://storage.googleapis.com/download.tensorflow.org/data/sarcasm.json' urllib.request.urlretrieve(url, 'sarcasm.json') vocab_size = 1000 embedding_dim = 16 max_length = 120 trunc_type = 'post' padding_type = 'post' oov_tok = '<OOV>' training_size = 20000 sentences = [] labels = [] with open('sarcasm.json', 'r') as f: data = json.load(f) for text in data: sentences.append(text['headline']) labels.append(text['is_sarcastic']) train_sentences = sentences[:training_size] test_sentences = sentences[training_size:] train_labels = labels[:training_size] test_labels = labels[training_size:] tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(train_sentences) train_sequences = tokenizer.texts_to_sequences(train_sentences) train_padded = pad_sequences(train_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) test_sequences = tokenizer.texts_to_sequences(test_sentences) test_padded = pad_sequences(test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type) train_labels = np.array(train_labels) test_labels = np.array(test_labels) model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32)), tf.keras.layers.Dense(24, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['acc']) model.fit(train_padded, train_labels, epochs=10, validation_data=(test_padded, test_labels)) return model if __name__ == '__main__': model = solution_model() model.save('mymodel6.h5')
code
129031792/cell_7
[ "text_plain_output_1.png" ]
from tensorflow.keras.optimizers import RMSprop from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import tensorflow as tf import urllib import urllib import zipfile def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss='mean_squared_error', optimizer='sgd') model.fit(xs, ys, epochs=1000) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5') def solution_model(): fashion_mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (val_images, val_label) = fashion_mnist.load_data() training_images = training_images / 255.0 val_images = val_images / 255.0 training_images = np.expand_dims(training_images, axis=3) val_images = np.expand_dims(val_images, axis=3) model = tf.keras.Sequential([tf.keras.layers.Conv2D(14, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) model.fit(training_images, training_labels, validation_data=(val_images, val_label), epochs=5) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5') def solution_model(): _TRAIN_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/horse-or-human.zip' _TEST_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/validation-horse-or-human.zip' urllib.request.urlretrieve(_TRAIN_URL, 'horse-or-human.zip') local_zip = 'horse-or-human.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('tmp/horse-or-human/') zip_ref.close() urllib.request.urlretrieve(_TEST_URL, 'testdata.zip') local_zip = 'testdata.zip' zip_ref = zipfile.ZipFile(local_zip, 'r') zip_ref.extractall('tmp/testdata/') zip_ref.close() training_data = 'tmp/horse-or-human/' val_data = 'tmp/testdata/' train_datagen = ImageDataGenerator(rescale=1.0 / 255) validation_datagen = ImageDataGenerator(rescale=1.0 / 255.0) train_generator = train_datagen.flow_from_directory(training_data, target_size=(300, 300), batch_size=128, class_mode='binary') validation_generator = validation_datagen.flow_from_directory(val_data, target_size=(300, 300), batch_size=64, class_mode='binary') model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(300, 300, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(32, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['accuracy']) model.fit(train_generator, epochs=10, verbose=1, validation_data=validation_generator) return model if __name__ == '__main__': model = solution_model() model.save('mymodel2.h5')
code
129031792/cell_3
[ "text_plain_output_1.png" ]
import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import tensorflow as tf def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss='mean_squared_error', optimizer='sgd') model.fit(xs, ys, epochs=1000) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5')
code
129031792/cell_5
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
import numpy as np import numpy as np import tensorflow as tf import tensorflow as tf import tensorflow as tf import tensorflow as tf def solution_model(): xs = np.array([-1.0, 0.0, 1.0, 2.0, 3.0, 4.0], dtype=float) ys = np.array([5.0, 6.0, 7.0, 8.0, 9.0, 10.0], dtype=float) model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=[1])]) model.compile(loss='mean_squared_error', optimizer='sgd') model.fit(xs, ys, epochs=1000) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5') def solution_model(): fashion_mnist = tf.keras.datasets.fashion_mnist (training_images, training_labels), (val_images, val_label) = fashion_mnist.load_data() training_images = training_images / 255.0 val_images = val_images / 255.0 training_images = np.expand_dims(training_images, axis=3) val_images = np.expand_dims(val_images, axis=3) model = tf.keras.Sequential([tf.keras.layers.Conv2D(14, (3, 3), activation='relu', input_shape=(28, 28, 1)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dense(10, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc']) model.fit(training_images, training_labels, validation_data=(val_images, val_label), epochs=5) return model if __name__ == '__main__': model = solution_model() model.save('mymodel.h5')
code
50219444/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5,5)) ax = sns.countplot(x='label', data= sarcasm_df) ax.set(title = "Distribution of Classes", xlabel="Sarcasm Status", ylabel = "Total Count") total = float(len(sarcasm_df )) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.1f}%'.format((height/total)*100), ha="center") plt.show() sns.boxplot(x=sarcasm_df.loc[sarcasm_df['label'] == 1, 'comment'].str.len()).set(title='Length of Sarcastic Comments', xlabel='Length') sns.despine(offset=10, trim=True) plt.show()
code
50219444/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5, 5)) ax = sns.countplot(x='label', data=sarcasm_df) ax.set(title='Distribution of Classes', xlabel='Sarcasm Status', ylabel='Total Count') total = float(len(sarcasm_df)) for p in ax.patches: height = p.get_height() ax.text(p.get_x() + p.get_width() / 2.0, height + 3, '{:1.1f}%'.format(height / total * 100), ha='center') plt.show()
code
50219444/cell_23
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5,5)) ax = sns.countplot(x='label', data= sarcasm_df) ax.set(title = "Distribution of Classes", xlabel="Sarcasm Status", ylabel = "Total Count") total = float(len(sarcasm_df )) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.1f}%'.format((height/total)*100), ha="center") plt.show() sns.despine(offset=10, trim=True) sns.despine(offset=10, trim=True) sarcasm_df['log_comment'] = sarcasm_df['comment'].apply(lambda text: np.log1p(len(text))) wordcloud = WordCloud(background_color='black', stopwords=STOPWORDS, max_words=200, max_font_size=100, random_state=17, width=800, height=400) wordcloud.generate(str(sarcasm_df.loc[sarcasm_df['label'] == 1, 'comment'])) labels = ['Sarcastic Score', 'Neutral Score'] sizes = [3235069, 3725113] colors = ['#F21F3B', '#1FF257'] plt.rcParams.update({'font.size': 14}) fig1, ax1 = plt.subplots() ax1.pie(sizes, colors=colors, labels=labels, autopct='%1.1f%%', startangle=30) ax1.set_title('Scores of Subreddits') centre_circle = plt.Circle((0, 0), 0.7, fc='white') fig = plt.gcf() fig.gca().add_artist(centre_circle) ax1.axis('equal') plt.tight_layout() plt.show()
code
50219444/cell_6
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) sarcasm_df.info()
code
50219444/cell_19
[ "image_output_1.png" ]
from wordcloud import WordCloud, STOPWORDS import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5,5)) ax = sns.countplot(x='label', data= sarcasm_df) ax.set(title = "Distribution of Classes", xlabel="Sarcasm Status", ylabel = "Total Count") total = float(len(sarcasm_df )) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.1f}%'.format((height/total)*100), ha="center") plt.show() sns.despine(offset=10, trim=True) sns.despine(offset=10, trim=True) sarcasm_df['log_comment'] = sarcasm_df['comment'].apply(lambda text: np.log1p(len(text))) wordcloud = WordCloud(background_color='black', stopwords=STOPWORDS, max_words=200, max_font_size=100, random_state=17, width=800, height=400) plt.figure(figsize=(12, 12)) wordcloud.generate(str(sarcasm_df.loc[sarcasm_df['label'] == 1, 'comment'])) plt.grid(b=False) plt.imshow(wordcloud)
code
50219444/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50219444/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5,5)) ax = sns.countplot(x='label', data= sarcasm_df) ax.set(title = "Distribution of Classes", xlabel="Sarcasm Status", ylabel = "Total Count") total = float(len(sarcasm_df )) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.1f}%'.format((height/total)*100), ha="center") plt.show() sns.despine(offset=10, trim=True) sns.despine(offset=10, trim=True) sarcasm_df['log_comment'] = sarcasm_df['comment'].apply(lambda text: np.log1p(len(text))) sarcasm_df[sarcasm_df['label'] == 1]['log_comment'].hist(alpha=0.6, label='Sarcastic', color='blue') sarcasm_df[sarcasm_df['label'] == 0]['log_comment'].hist(alpha=0.6, label='Non-Sarcastic', color='red') plt.legend() plt.title('Natural Log Length of Comments') plt.show()
code
50219444/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns sarcasm_df = pd.read_csv('../input/sarcasm/train-balanced-sarcasm.csv') sarcasm_df.dropna(subset=['comment'], inplace=True) sarcasm_df['comment'] = sarcasm_df['comment'].str.lower() sarcasm_df['comment'] = sarcasm_df['comment'].str.replace('[^\\w\\s]', '') sarcasm_df.created_utc = pd.to_datetime(sarcasm_df.created_utc) plt.figure(figsize=(5,5)) ax = sns.countplot(x='label', data= sarcasm_df) ax.set(title = "Distribution of Classes", xlabel="Sarcasm Status", ylabel = "Total Count") total = float(len(sarcasm_df )) for p in ax.patches: height = p.get_height() ax.text(p.get_x()+p.get_width()/2., height + 3, '{:1.1f}%'.format((height/total)*100), ha="center") plt.show() sns.despine(offset=10, trim=True) sns.boxplot(x=sarcasm_df.loc[sarcasm_df['label'] == 0, 'comment'].str.len()).set(title='Length of Neutral Comments', xlabel='Length') sns.despine(offset=10, trim=True) plt.show()
code
89135965/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/titanic/train.csv') data_test = pd.read_csv('../input/titanic/test.csv') X = data[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']] y = data.Survived X.isnull().sum() def impute_age(X): mc_age = X[(X.Age <= 18) & (X.Sex == 'male')].Age.mean() fc_age = X[(X.Age <= 18) & (X.Sex == 'female')].Age.mean() ma_age = X[(X.Age > 18) & (X.Sex == 'male')].Age.mean() fa_age = X[(X.Age > 18) & (X.Sex == 'female')].Age.mean() for idx, row in X.iterrows(): if not pd.isna(row.Age): continue if row.SibSp >= 3: if row.Sex == 'male': X.loc[idx, 'Age'] = mc_age if row.Sex == 'female': X.loc[idx, 'Age'] = fc_age else: if row.Sex == 'male': X.loc[idx, 'Age'] = ma_age if row.Sex == 'female': X.loc[idx, 'Age'] = fa_age def impute_embarked(X): most_common = X.Embarked.mode() X.Embarked = [most_common if pd.isna(x) else x for x in X.Embarked] def impute_cabins(X): for a in 'ABCDEFG': X[a] = [0 for _ in range(X.shape[0])] for i, cabins in enumerate(X.Cabin): if pd.isna(cabins): continue cabins = cabins.split() for cabin in cabins: if cabin == 'T': continue X.loc[i, cabins[0][:1]] = int(cabin[1:] or 0) X.drop('Cabin', axis=1, inplace=True) X.isnull().sum()
code
89135965/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/titanic/train.csv') data_test = pd.read_csv('../input/titanic/test.csv') X = data[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']] y = data.Survived X.isnull().sum()
code
89135965/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/titanic/train.csv') data_test = pd.read_csv('../input/titanic/test.csv') data.head()
code
89135965/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/titanic/train.csv') data_test = pd.read_csv('../input/titanic/test.csv') X = data[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']] y = data.Survived X.isnull().sum() def impute_age(X): mc_age = X[(X.Age <= 18) & (X.Sex == 'male')].Age.mean() fc_age = X[(X.Age <= 18) & (X.Sex == 'female')].Age.mean() ma_age = X[(X.Age > 18) & (X.Sex == 'male')].Age.mean() fa_age = X[(X.Age > 18) & (X.Sex == 'female')].Age.mean() for idx, row in X.iterrows(): if not pd.isna(row.Age): continue if row.SibSp >= 3: if row.Sex == 'male': X.loc[idx, 'Age'] = mc_age if row.Sex == 'female': X.loc[idx, 'Age'] = fc_age else: if row.Sex == 'male': X.loc[idx, 'Age'] = ma_age if row.Sex == 'female': X.loc[idx, 'Age'] = fa_age def impute_embarked(X): most_common = X.Embarked.mode() X.Embarked = [most_common if pd.isna(x) else x for x in X.Embarked] def impute_cabins(X): for a in 'ABCDEFG': X[a] = [0 for _ in range(X.shape[0])] for i, cabins in enumerate(X.Cabin): if pd.isna(cabins): continue cabins = cabins.split() for cabin in cabins: if cabin == 'T': continue X.loc[i, cabins[0][:1]] = int(cabin[1:] or 0) X.drop('Cabin', axis=1, inplace=True) impute_age(X) impute_embarked(X) impute_cabins(X)
code
89135965/cell_10
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import cross_val_score, train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/titanic/train.csv') data_test = pd.read_csv('../input/titanic/test.csv') X = data[['Pclass', 'Age', 'Sex', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Cabin']] y = data.Survived X.isnull().sum() def impute_age(X): mc_age = X[(X.Age <= 18) & (X.Sex == 'male')].Age.mean() fc_age = X[(X.Age <= 18) & (X.Sex == 'female')].Age.mean() ma_age = X[(X.Age > 18) & (X.Sex == 'male')].Age.mean() fa_age = X[(X.Age > 18) & (X.Sex == 'female')].Age.mean() for idx, row in X.iterrows(): if not pd.isna(row.Age): continue if row.SibSp >= 3: if row.Sex == 'male': X.loc[idx, 'Age'] = mc_age if row.Sex == 'female': X.loc[idx, 'Age'] = fc_age else: if row.Sex == 'male': X.loc[idx, 'Age'] = ma_age if row.Sex == 'female': X.loc[idx, 'Age'] = fa_age def impute_embarked(X): most_common = X.Embarked.mode() X.Embarked = [most_common if pd.isna(x) else x for x in X.Embarked] def impute_cabins(X): for a in 'ABCDEFG': X[a] = [0 for _ in range(X.shape[0])] for i, cabins in enumerate(X.Cabin): if pd.isna(cabins): continue cabins = cabins.split() for cabin in cabins: if cabin == 'T': continue X.loc[i, cabins[0][:1]] = int(cabin[1:] or 0) X.drop('Cabin', axis=1, inplace=True) X.isnull().sum() def optimize_model(ne, md, trials): for i in range(round(ne['low'] / ne['step']), round(ne['high'] / ne['step'])): for j in range(round(md['low'] / md['step']), round(md['high'] / md['step'])): print(i) maes = [] for k in range(trials): X_train, y_train, X_test, y_test = train_test_split(X, y, train_size=0.7, shuffle=True) model = RandomForestClassifier(n_estimators=i * ne['step'], max_depth=j * md['step']) model.fit(X_train, y_train) prediction = model.predict(X_test) maes.push(mean_abolute_error(y_test, prediction)) print(maes) print(f"average: {sum(maes) / trials}\nne: {i * ne['step']}\tmd: {j * md['step']}") optimize_model({'low': 50, 'high': 1000, 'step': 50}, {'low': 50, 'high': 300, 'step': 50}, trials=3)
code
73079027/cell_6
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id') test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train.shape train.head()
code
73079027/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv', index_col='Id') test = pd.read_csv('../input/house-prices-advanced-regression-techniques/test.csv') train.shape
code
105201347/cell_13
[ "text_plain_output_1.png" ]
import lazypredict from lazypredict.Supervised import LazyClassifier
code
105201347/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data data.describe()
code
105201347/cell_2
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data
code
105201347/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data from sklearn.preprocessing import LabelEncoder le = LabelEncoder() data = data.apply(le.fit_transform) data
code
105201347/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data from sklearn.preprocessing import LabelEncoder le = LabelEncoder() data = data.apply(le.fit_transform) data from sklearn.preprocessing import MinMaxScaler scaling = MinMaxScaler() data = scaling.fit_transform(data) data = pd.DataFrame(data) data
code
105201347/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data data.info()
code
105201347/cell_10
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data from sklearn.preprocessing import LabelEncoder le = LabelEncoder() data = data.apply(le.fit_transform) data from sklearn.preprocessing import MinMaxScaler scaling = MinMaxScaler() data = scaling.fit_transform(data) data = pd.DataFrame(data) data x = data.drop(10, axis=1) y = data[10] plt.figure(figsize=(10, 6)) sns.heatmap(x.corr(), annot=True)
code
105201347/cell_5
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/indian-liver-patient-records/indian_liver_patient.csv') data plt.figure(figsize=(10, 6)) sns.heatmap(data.corr(), annot=True)
code
33104556/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum() test.isnull().sum() dist = train[train.target==1].keyword.value_counts().head() #dist plt.figure(figsize=(9,6)) sns.barplot(dist,dist.index) plt.show() nondist = train[train.target == 0].keyword.value_counts().head() plt.figure(figsize=(9, 6)) sns.barplot(nondist, nondist.index) plt.show()
code
33104556/cell_9
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') test.isnull().sum()
code
33104556/cell_4
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train.head(10)
code
33104556/cell_6
[ "image_output_1.png" ]
import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) sns.countplot(x='target', data=train)
code
33104556/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum() test.isnull().sum() plt.figure(figsize=(12, 8)) sns.countplot(x='keyword', data=train, order=train.keyword.value_counts().iloc[:15].index) plt.title('Top Keywords') plt.show()
code
33104556/cell_7
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts()
code
33104556/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum()
code
33104556/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum() test.isnull().sum() dist = train[train.target==1].keyword.value_counts().head() #dist plt.figure(figsize=(9,6)) sns.barplot(dist,dist.index) plt.show() nondist = train[train.target==0].keyword.value_counts().head() #nondist plt.figure(figsize=(9,6)) sns.barplot(nondist,nondist.index) plt.show() distribution_dist = train.groupby('keyword').mean()['target'].sort_values(ascending=False).head(10) distribution_nondist = train.groupby('keyword').mean()['target'].sort_values().head(10) plt.figure(figsize=(9, 6)) sns.barplot(distribution_nondist, distribution_nondist.index) plt.title('Distribution of Non Disasters keywords for lower risk') plt.show()
code
33104556/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum() test.isnull().sum() dist = train[train.target==1].keyword.value_counts().head() #dist plt.figure(figsize=(9,6)) sns.barplot(dist,dist.index) plt.show() nondist = train[train.target==0].keyword.value_counts().head() #nondist plt.figure(figsize=(9,6)) sns.barplot(nondist,nondist.index) plt.show() distribution_dist = train.groupby('keyword').mean()['target'].sort_values(ascending=False).head(10) distribution_nondist = train.groupby('keyword').mean()['target'].sort_values().head(10) print(train.location.nunique(), test.location.nunique())
code
33104556/cell_3
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') print('Train : ', train.shape) print('*' * 10) print('Test : ', test.shape)
code
33104556/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv') test = pd.read_csv('/kaggle/input/nlp-getting-started/test.csv') train = train.drop_duplicates().reset_index(drop=True) train.target.value_counts() train.isnull().sum() test.isnull().sum() dist = train[train.target==1].keyword.value_counts().head() #dist plt.figure(figsize=(9,6)) sns.barplot(dist,dist.index) plt.show() nondist = train[train.target==0].keyword.value_counts().head() #nondist plt.figure(figsize=(9,6)) sns.barplot(nondist,nondist.index) plt.show() distribution_dist = train.groupby('keyword').mean()['target'].sort_values(ascending=False).head(10) plt.figure(figsize=(9, 6)) sns.barplot(distribution_dist, distribution_dist.index) plt.title('Distribution of keywords for higher risk') plt.show()
code