path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
88091003/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.model_selection import StratifiedKFold, GroupKFold import librosa import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import soundfile as sf SEED = 42 DATA_PATH = '../input/birdclef-2022/' AUDIO_PATH = '../input/birdclef-2022/train_audio' MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) NUM_WORKERS = 4 CLASSES = sorted(os.listdir(AUDIO_PATH)) NUM_CLASSES = len(CLASSES) class AudioParams: """ Parameters used for the audio data """ sr = 32000 duration = 5 n_mels = 224 fmin = 20 fmax = 16000 train = pd.read_csv('../input/birdclef-2022/train_metadata.csv') train['file_path'] = AUDIO_PATH + '/' + train['filename'] paths = train['file_path'].values Fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED) for n, (trn_index, val_index) in enumerate(Fold.split(train, train['primary_label'])): train.loc[val_index, 'kfold'] = int(n) train['kfold'] = train['kfold'].astype(int) train.to_csv('train_folds.csv', index=False) def compute_melspec(y, params): """ Computes a mel-spectrogram and puts it at decibel scale Arguments: y {np array} -- signal params {AudioParams} -- Parameters to use for the spectrogram. Expected to have the attributes sr, n_mels, f_min, f_max Returns: np array -- Mel-spectrogram """ melspec = librosa.feature.melspectrogram(y=y, sr=params.sr, n_mels=params.n_mels, fmin=params.fmin, fmax=params.fmax) melspec = librosa.power_to_db(melspec).astype(np.float32) return melspec def crop_or_pad(y, length, sr, train=True, probs=None): """ Crops an array to a chosen length Arguments: y {1D np array} -- Array to crop length {int} -- Length of the crop sr {int} -- Sampling rate Keyword Arguments: train {bool} -- Whether we are at train time. If so, crop randomly, else return the beginning of y (default: {True}) probs {None or numpy array} -- Probabilities to use to chose where to crop (default: {None}) Returns: 1D np array -- Cropped array """ if len(y) <= length: y = np.concatenate([y, np.zeros(length - len(y))]) else: if not train: start = 0 elif probs is None: start = np.random.randint(len(y) - length) else: start = np.random.choice(np.arange(len(probs)), p=probs) + np.random.random() start = int(sr * start) y = y[start:start + length] return y.astype(np.float32) def mono_to_color(X, eps=1e-06, mean=None, std=None): """ Converts a one channel array to a 3 channel one in [0, 255] Arguments: X {numpy array [H x W]} -- 2D array to convert Keyword Arguments: eps {float} -- To avoid dividing by 0 (default: {1e-6}) mean {None or np array} -- Mean for normalization (default: {None}) std {None or np array} -- Std for normalization (default: {None}) Returns: numpy array [3 x H x W] -- RGB numpy array """ X = np.stack([X, X, X], axis=-1) mean = mean or X.mean() std = std or X.std() X = (X - mean) / (std + eps) _min, _max = (X.min(), X.max()) if _max - _min > eps: V = np.clip(X, _min, _max) V = 255 * (V - _min) / (_max - _min) V = V.astype(np.uint8) else: V = np.zeros_like(X, dtype=np.uint8) return V path = train['file_path'][0] y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) X = compute_melspec(y, AudioParams) X = mono_to_color(X) X = X.astype(np.uint8) plt.imshow(X)
code
88091003/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "text_html_output_1.png", "text_plain_output_1.png" ]
!pip install ../input/torchlibrosa/torchlibrosa-0.0.5-py3-none-any.whl > /dev/null
code
88091003/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import StratifiedKFold, GroupKFold import librosa import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import soundfile as sf SEED = 42 DATA_PATH = '../input/birdclef-2022/' AUDIO_PATH = '../input/birdclef-2022/train_audio' MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) NUM_WORKERS = 4 CLASSES = sorted(os.listdir(AUDIO_PATH)) NUM_CLASSES = len(CLASSES) class AudioParams: """ Parameters used for the audio data """ sr = 32000 duration = 5 n_mels = 224 fmin = 20 fmax = 16000 train = pd.read_csv('../input/birdclef-2022/train_metadata.csv') train['file_path'] = AUDIO_PATH + '/' + train['filename'] paths = train['file_path'].values Fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED) for n, (trn_index, val_index) in enumerate(Fold.split(train, train['primary_label'])): train.loc[val_index, 'kfold'] = int(n) train['kfold'] = train['kfold'].astype(int) train.to_csv('train_folds.csv', index=False) def compute_melspec(y, params): """ Computes a mel-spectrogram and puts it at decibel scale Arguments: y {np array} -- signal params {AudioParams} -- Parameters to use for the spectrogram. Expected to have the attributes sr, n_mels, f_min, f_max Returns: np array -- Mel-spectrogram """ melspec = librosa.feature.melspectrogram(y=y, sr=params.sr, n_mels=params.n_mels, fmin=params.fmin, fmax=params.fmax) melspec = librosa.power_to_db(melspec).astype(np.float32) return melspec def crop_or_pad(y, length, sr, train=True, probs=None): """ Crops an array to a chosen length Arguments: y {1D np array} -- Array to crop length {int} -- Length of the crop sr {int} -- Sampling rate Keyword Arguments: train {bool} -- Whether we are at train time. If so, crop randomly, else return the beginning of y (default: {True}) probs {None or numpy array} -- Probabilities to use to chose where to crop (default: {None}) Returns: 1D np array -- Cropped array """ if len(y) <= length: y = np.concatenate([y, np.zeros(length - len(y))]) else: if not train: start = 0 elif probs is None: start = np.random.randint(len(y) - length) else: start = np.random.choice(np.arange(len(probs)), p=probs) + np.random.random() start = int(sr * start) y = y[start:start + length] return y.astype(np.float32) def mono_to_color(X, eps=1e-06, mean=None, std=None): """ Converts a one channel array to a 3 channel one in [0, 255] Arguments: X {numpy array [H x W]} -- 2D array to convert Keyword Arguments: eps {float} -- To avoid dividing by 0 (default: {1e-6}) mean {None or np array} -- Mean for normalization (default: {None}) std {None or np array} -- Std for normalization (default: {None}) Returns: numpy array [3 x H x W] -- RGB numpy array """ X = np.stack([X, X, X], axis=-1) mean = mean or X.mean() std = std or X.std() X = (X - mean) / (std + eps) _min, _max = (X.min(), X.max()) if _max - _min > eps: V = np.clip(X, _min, _max) V = 255 * (V - _min) / (_max - _min) V = V.astype(np.uint8) else: V = np.zeros_like(X, dtype=np.uint8) return V path = train['file_path'][0] y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) X = compute_melspec(y, AudioParams) X = mono_to_color(X) X = X.astype(np.uint8) path = train['file_path'][0] y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) y = crop_or_pad(y, AudioParams.duration * AudioParams.sr, sr=AudioParams.sr, train=True, probs=None) X = compute_melspec(y, AudioParams) X = mono_to_color(X) X = X.astype(np.uint8) plt.imshow(X)
code
88091003/cell_10
[ "text_plain_output_1.png" ]
from joblib import Parallel, delayed from sklearn.model_selection import StratifiedKFold, GroupKFold from tqdm import tqdm import librosa import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import soundfile as sf SEED = 42 DATA_PATH = '../input/birdclef-2022/' AUDIO_PATH = '../input/birdclef-2022/train_audio' MEAN = np.array([0.485, 0.456, 0.406]) STD = np.array([0.229, 0.224, 0.225]) NUM_WORKERS = 4 CLASSES = sorted(os.listdir(AUDIO_PATH)) NUM_CLASSES = len(CLASSES) class AudioParams: """ Parameters used for the audio data """ sr = 32000 duration = 5 n_mels = 224 fmin = 20 fmax = 16000 train = pd.read_csv('../input/birdclef-2022/train_metadata.csv') train['file_path'] = AUDIO_PATH + '/' + train['filename'] paths = train['file_path'].values Fold = StratifiedKFold(n_splits=5, shuffle=True, random_state=SEED) for n, (trn_index, val_index) in enumerate(Fold.split(train, train['primary_label'])): train.loc[val_index, 'kfold'] = int(n) train['kfold'] = train['kfold'].astype(int) train.to_csv('train_folds.csv', index=False) def compute_melspec(y, params): """ Computes a mel-spectrogram and puts it at decibel scale Arguments: y {np array} -- signal params {AudioParams} -- Parameters to use for the spectrogram. Expected to have the attributes sr, n_mels, f_min, f_max Returns: np array -- Mel-spectrogram """ melspec = librosa.feature.melspectrogram(y=y, sr=params.sr, n_mels=params.n_mels, fmin=params.fmin, fmax=params.fmax) melspec = librosa.power_to_db(melspec).astype(np.float32) return melspec def crop_or_pad(y, length, sr, train=True, probs=None): """ Crops an array to a chosen length Arguments: y {1D np array} -- Array to crop length {int} -- Length of the crop sr {int} -- Sampling rate Keyword Arguments: train {bool} -- Whether we are at train time. If so, crop randomly, else return the beginning of y (default: {True}) probs {None or numpy array} -- Probabilities to use to chose where to crop (default: {None}) Returns: 1D np array -- Cropped array """ if len(y) <= length: y = np.concatenate([y, np.zeros(length - len(y))]) else: if not train: start = 0 elif probs is None: start = np.random.randint(len(y) - length) else: start = np.random.choice(np.arange(len(probs)), p=probs) + np.random.random() start = int(sr * start) y = y[start:start + length] return y.astype(np.float32) def mono_to_color(X, eps=1e-06, mean=None, std=None): """ Converts a one channel array to a 3 channel one in [0, 255] Arguments: X {numpy array [H x W]} -- 2D array to convert Keyword Arguments: eps {float} -- To avoid dividing by 0 (default: {1e-6}) mean {None or np array} -- Mean for normalization (default: {None}) std {None or np array} -- Std for normalization (default: {None}) Returns: numpy array [3 x H x W] -- RGB numpy array """ X = np.stack([X, X, X], axis=-1) mean = mean or X.mean() std = std or X.std() X = (X - mean) / (std + eps) _min, _max = (X.min(), X.max()) if _max - _min > eps: V = np.clip(X, _min, _max) V = 255 * (V - _min) / (_max - _min) V = V.astype(np.uint8) else: V = np.zeros_like(X, dtype=np.uint8) return V path = train['file_path'][0] y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) X = compute_melspec(y, AudioParams) X = mono_to_color(X) X = X.astype(np.uint8) path = train['file_path'][0] y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) y = crop_or_pad(y, AudioParams.duration * AudioParams.sr, sr=AudioParams.sr, train=True, probs=None) X = compute_melspec(y, AudioParams) X = mono_to_color(X) X = X.astype(np.uint8) def Audio_to_Image(path, params): y, sr = sf.read(path, always_2d=True) y = np.mean(y, 1) y = crop_or_pad(y, params.duration * params.sr, sr=params.sr, train=True, probs=None) image = compute_melspec(y, params) image = mono_to_color(image) image = image.astype(np.uint8) return image def save_(path): save_path = '../working/' + '/'.join(path.split('/')[-2:]) np.save(save_path, Audio_to_Image(path, AudioParams)) NUM_WORKERS = 4 for dir_ in CLASSES: _ = os.makedirs(dir_, exist_ok=True) _ = Parallel(n_jobs=NUM_WORKERS)((delayed(save_)(AUDIO_PATH) for AUDIO_PATH in tqdm(paths)))
code
50237786/cell_21
[ "text_plain_output_1.png" ]
alphabet = 'abcdefghijklmnopqrstuvwxyz' key = 'xznlwebgjhqdyvtkfuompciasr' secret_message = input('Enter your message: ') secret_message = secret_message.lower() for c in secret_message: if c.isalpha(): print(key[alphabet.index(c)], end='') else: print(c, end='')
code
50237786/cell_13
[ "text_plain_output_1.png" ]
s = '' for i in range(10): t = input('Enter a letter: ') if t == 'a' or t == 'e' or t == 'i' or (t == 'o') or (t == 'u'): s = s + t s = input('Enter a string') s = input('Enter some text: ') s = input('Enter some text: ') doubled_s = '' for c in s: doubled_s = doubled_s + c * 2
code
50237786/cell_9
[ "text_plain_output_1.png" ]
print('\n' * 9)
code
50237786/cell_11
[ "text_plain_output_1.png" ]
s = '' for i in range(10): t = input('Enter a letter: ') if t == 'a' or t == 'e' or t == 'i' or (t == 'o') or (t == 'u'): s = s + t s = input('Enter a string') s = input('Enter some text: ') for i in range(len(s)): if s[i] == 'a': print(i)
code
50237786/cell_19
[ "text_plain_output_1.png" ]
s = '' for i in range(10): t = input('Enter a letter: ') if t == 'a' or t == 'e' or t == 'i' or (t == 'o') or (t == 'u'): s = s + t s = input('Enter a string') s = input('Enter some text: ') s = input('Enter some text: ') doubled_s = '' for c in s: doubled_s = doubled_s + c * 2 s = s.lower() for c in ',.;:-?!()\'"': s = s.replace(c, '') s = input('Enter your decimal number: ') print(s[s.index('.') + 1:])
code
50237786/cell_1
[ "text_plain_output_1.png" ]
print('-' * 75)
code
50237786/cell_7
[ "text_plain_output_1.png" ]
print('Hi\n\nthere!')
code
50237786/cell_15
[ "text_plain_output_1.png" ]
name = input('Enter your name: ') for i in range(len(name)): print(name[:i + 1], end=' ')
code
50237786/cell_3
[ "text_plain_output_1.png" ]
s = '' for i in range(10): t = input('Enter a letter: ') if t == 'a' or t == 'e' or t == 'i' or (t == 'o') or (t == 'u'): s = s + t print(s)
code
50237786/cell_5
[ "text_plain_output_1.png" ]
s = '' for i in range(10): t = input('Enter a letter: ') if t == 'a' or t == 'e' or t == 'i' or (t == 'o') or (t == 'u'): s = s + t s = input('Enter a string') if s[0].isalpha(): print('Your string starts with a letter') if not s.isalpha(): print('Your string contains a non-letter.')
code
128020888/cell_21
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy = Fuzzy.dropna() print(Fuzzy.dtypes)
code
128020888/cell_13
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) print(DoS1)
code
128020888/cell_25
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy = Fuzzy.dropna() Fuzzy['Timestamp'] = pd.to_datetime(Fuzzy['Timestamp'], unit='s') Fuzzy['Timestamp'] = Fuzzy['Timestamp'].apply(lambda x: int(x.timestamp())) Fuzzy['CAN_ID'] = Fuzzy['CAN_ID'].apply(lambda x: int(x, 16)) Fuzzy['Data0'] = Fuzzy['Data0'].apply(lambda x: int(x, 16)) Fuzzy['Data1'] = Fuzzy['Data1'].apply(lambda x: int(x, 16)) Fuzzy['Data2'] = Fuzzy['Data2'].apply(lambda x: int(x, 16)) Fuzzy['Data3'] = Fuzzy['Data3'].apply(lambda x: int(x, 16)) Fuzzy['Data4'] = Fuzzy['Data4'].apply(lambda x: int(x, 16)) Fuzzy['Data5'] = Fuzzy['Data5'].apply(lambda x: int(x, 16)) Fuzzy['Data6'] = Fuzzy['Data6'].apply(lambda x: int(x, 16)) Fuzzy['Data7'] = Fuzzy['Data7'].apply(lambda x: int(x, 16)) Fuzzy['DLC'] = Fuzzy['DLC'].astype(int) X = Fuzzy[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = Fuzzy['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) print(X_sm, y_sm)
code
128020888/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS['flag'].value_counts(normalize=True)
code
128020888/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) print(DoS)
code
128020888/cell_11
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) print(X_sm, y_sm)
code
128020888/cell_19
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy['flag'].isnull().sum()
code
128020888/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() print(DoS.dtypes)
code
128020888/cell_18
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy['flag'].value_counts(normalize=True)
code
128020888/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) print(DoS.head())
code
128020888/cell_15
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) DoS1 = DoS1.dropna() display(DoS1.dtypes)
code
128020888/cell_16
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) print(Fuzzy)
code
128020888/cell_24
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy = Fuzzy.dropna() Fuzzy['Timestamp'] = pd.to_datetime(Fuzzy['Timestamp'], unit='s') Fuzzy['Timestamp'] = Fuzzy['Timestamp'].apply(lambda x: int(x.timestamp())) Fuzzy['CAN_ID'] = Fuzzy['CAN_ID'].apply(lambda x: int(x, 16)) Fuzzy['Data0'] = Fuzzy['Data0'].apply(lambda x: int(x, 16)) Fuzzy['Data1'] = Fuzzy['Data1'].apply(lambda x: int(x, 16)) Fuzzy['Data2'] = Fuzzy['Data2'].apply(lambda x: int(x, 16)) Fuzzy['Data3'] = Fuzzy['Data3'].apply(lambda x: int(x, 16)) Fuzzy['Data4'] = Fuzzy['Data4'].apply(lambda x: int(x, 16)) Fuzzy['Data5'] = Fuzzy['Data5'].apply(lambda x: int(x, 16)) Fuzzy['Data6'] = Fuzzy['Data6'].apply(lambda x: int(x, 16)) Fuzzy['Data7'] = Fuzzy['Data7'].apply(lambda x: int(x, 16)) Fuzzy['DLC'] = Fuzzy['DLC'].astype(int) X = Fuzzy[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = Fuzzy['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True)
code
128020888/cell_14
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) DoS1 = DoS1.dropna() print(DoS1)
code
128020888/cell_22
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True) DoS1 = pd.concat([X_sm, y_sm], axis=1) Fuzzy = pd.read_csv('/kaggle/input/car-security/Fuzzy_dataset.csv') Fuzzy.rename(inplace=True, columns={'1478195721.903877': 'Timestamp', '0545': 'CAN_ID', '8': 'DLC', 'd8': 'Data0', '00': 'Data1', '00.1': 'Data2', '8a': 'Data3', '00.2': 'Data4', '00.3': 'Data5', '00.4': 'Data6', '00.5': 'Data7', 'R': 'flag'}) Fuzzy = Fuzzy.dropna() Fuzzy['Timestamp'] = pd.to_datetime(Fuzzy['Timestamp'], unit='s') Fuzzy['Timestamp'] = Fuzzy['Timestamp'].apply(lambda x: int(x.timestamp())) Fuzzy['CAN_ID'] = Fuzzy['CAN_ID'].apply(lambda x: int(x, 16)) Fuzzy['Data0'] = Fuzzy['Data0'].apply(lambda x: int(x, 16)) Fuzzy['Data1'] = Fuzzy['Data1'].apply(lambda x: int(x, 16)) Fuzzy['Data2'] = Fuzzy['Data2'].apply(lambda x: int(x, 16)) Fuzzy['Data3'] = Fuzzy['Data3'].apply(lambda x: int(x, 16)) Fuzzy['Data4'] = Fuzzy['Data4'].apply(lambda x: int(x, 16)) Fuzzy['Data5'] = Fuzzy['Data5'].apply(lambda x: int(x, 16)) Fuzzy['Data6'] = Fuzzy['Data6'].apply(lambda x: int(x, 16)) Fuzzy['Data7'] = Fuzzy['Data7'].apply(lambda x: int(x, 16)) Fuzzy['DLC'] = Fuzzy['DLC'].astype(int) print(Fuzzy.head())
code
128020888/cell_10
[ "text_plain_output_1.png" ]
from imblearn.over_sampling import SMOTE import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS = DoS.dropna() DoS['Timestamp'] = pd.to_datetime(DoS['Timestamp'], unit='s') DoS['Timestamp'] = DoS['Timestamp'].apply(lambda x: int(x.timestamp())) DoS['CAN_ID'] = DoS['CAN_ID'].apply(lambda x: int(x, 16)) DoS['Data0'] = DoS['Data0'].apply(lambda x: int(x, 16)) DoS['Data1'] = DoS['Data1'].apply(lambda x: int(x, 16)) DoS['Data2'] = DoS['Data2'].apply(lambda x: int(x, 16)) DoS['Data3'] = DoS['Data3'].apply(lambda x: int(x, 16)) DoS['Data4'] = DoS['Data4'].apply(lambda x: int(x, 16)) DoS['Data5'] = DoS['Data5'].apply(lambda x: int(x, 16)) DoS['Data6'] = DoS['Data6'].apply(lambda x: int(x, 16)) DoS['Data7'] = DoS['Data7'].apply(lambda x: int(x, 16)) DoS['DLC'] = DoS['DLC'].astype(int) X = DoS[['Timestamp', 'CAN_ID', 'DLC', 'Data0', 'Data1', 'Data2', 'Data3', 'Data4', 'Data5', 'Data6', 'Data7']] y = DoS['flag'] from imblearn.over_sampling import SMOTE smt = SMOTE() X_sm, y_sm = smt.fit_resample(X, y) y_sm.value_counts(normalize=True)
code
128020888/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd DoS = pd.read_csv('/kaggle/input/car-security/DoS_dataset.csv') DoS.rename(inplace=True, columns={'1478198376.389427': 'Timestamp', '0316': 'CAN_ID', '8': 'DLC', '05': 'Data0', '21': 'Data1', '68': 'Data2', '09': 'Data3', '21.1': 'Data4', '21.2': 'Data5', '00': 'Data6', '6f': 'Data7', 'R': 'flag'}) DoS['flag'].isnull().sum()
code
334146/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_head = train[:10000] plt.figure(figsize=(20, 15)) plt.scatter(x=train_head.x, y=train_head.y, c=train_head.time)
code
334146/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.describe()
code
334146/cell_2
[ "text_html_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
334146/cell_7
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.describe()
code
334146/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') plt.figure(figsize=(20, 10)) sns.distplot(bins=200, a=train.accuracy)
code
334146/cell_3
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train.head()
code
334146/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') train_head = train[:10000] plt.figure(figsize=(20, 10)) plt.scatter(x=train_head.time, y=train_head.accuracy)
code
334146/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.head()
code
49117600/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() pd.crosstab(data.Department, data.left).plot(kind='bar')
code
49117600/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] right.shape
code
49117600/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') data.info()
code
49117600/cell_30
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression Reg = LogisticRegression() Reg.fit(X_train, y_train) Reg.predict(X_test)
code
49117600/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() sub_salary = data[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] sal_dummies = pd.get_dummies(sub_salary.salary, prefix='salary') df = pd.concat([sub_salary, sal_dummies], axis='columns') df.drop(columns='salary', inplace=True) df.head()
code
49117600/cell_29
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression Reg = LogisticRegression() Reg.fit(X_train, y_train)
code
49117600/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean()
code
49117600/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49117600/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] left.shape
code
49117600/cell_18
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() sub_salary = data[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] sal_dummies = pd.get_dummies(sub_salary.salary, prefix='salary') df = pd.concat([sub_salary, sal_dummies], axis='columns') df.head()
code
49117600/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() sub_salary = data[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] sub_salary.head()
code
49117600/cell_31
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegression Reg = LogisticRegression() Reg.fit(X_train, y_train) Reg.predict(X_test) Reg.score(X_test, y_test)
code
49117600/cell_24
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() y = data['left'] y.head()
code
49117600/cell_22
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() sub_salary = data[['satisfaction_level', 'average_montly_hours', 'promotion_last_5years', 'salary']] sal_dummies = pd.get_dummies(sub_salary.salary, prefix='salary') df = pd.concat([sub_salary, sal_dummies], axis='columns') df.drop(columns='salary', inplace=True) X = df X.head()
code
49117600/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape
code
49117600/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') left = data[data.left == 1] right = data[data.left == 0] data.shape data.groupby('left').mean() pd.crosstab(data.salary, data.left).plot(kind='bar')
code
49117600/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/hr-analytics/HR_comma_sep.csv') data.head()
code
106205052/cell_13
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) print(e + f)
code
106205052/cell_9
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) print(c + d)
code
106205052/cell_20
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) a = np.array([1, 2, 4]) b = np.tile(a, (2, 1)) b a = np.array([1, 2, 3]) a.shape b = a[0:3, np.newaxis] b b.ndim
code
106205052/cell_19
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) a = np.array([1, 2, 4]) b = np.tile(a, (2, 1)) b a = np.array([1, 2, 3]) a.shape b = a[0:3, np.newaxis] b
code
106205052/cell_7
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) print('array1', c) print('\n') print('array2', d)
code
106205052/cell_18
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) a = np.array([1, 2, 4]) b = np.tile(a, (2, 1)) b a = np.array([1, 2, 3]) a.shape
code
106205052/cell_8
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) print(c.shape, d.shape)
code
106205052/cell_16
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) a = np.array([1, 2, 4]) b = np.tile(a, (2, 1)) b
code
106205052/cell_12
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) c = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) d = np.array([[1], [0], [1]]) e = np.array([1, 1, 0]) f = np.array([[1], [2], [1]]) print(e.shape) print(f.shape)
code
106205052/cell_5
[ "text_plain_output_1.png" ]
import numpy as np a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) b = np.array([1, 1, 0]) print(a.shape, b.shape) print(a + b)
code
129003546/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame()
code
129003546/cell_4
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data
code
129003546/cell_34
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes country = data['country'].value_counts().head(10) country.to_frame()
code
129003546/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes
code
129003546/cell_33
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes data.head(2)
code
129003546/cell_20
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame() x_values, y_values = (types.values, types.index) director = data['director'].value_counts().head(10) director.to_frame() x_values = director.values y_values = director.index plt.figure(figsize=(7, 5)) sns.barplot(x=x_values, y=y_values, palette='rainbow') plt.ylabel('director')
code
129003546/cell_29
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes rating = data['rating'] rating.to_frame()
code
129003546/cell_39
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes cast = data['cast'].value_counts().head(10) cast.to_frame()
code
129003546/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame() x_values, y_values = (types.values, types.index) director = data['director'].value_counts().head(10) director.to_frame() x_values = director.values y_values = director.index data.dtypes release_year = data['release_year'] release_year.to_frame() x_values = release_year.values plt.figure(figsize=(10, 5)) sns.countplot(x=x_values) plt.xlabel('year') plt.xticks(rotation=90)
code
129003546/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.head(2)
code
129003546/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() director = data['director'].value_counts().head(10) director.to_frame()
code
129003546/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes data.head(2)
code
129003546/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum()
code
129003546/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame() x_values, y_values = (types.values, types.index) plt.figure(figsize=(7, 5)) sns.barplot(data=types, x=x_values, y=y_values) plt.xlabel('TV Show Movies')
code
129003546/cell_38
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes data.head(2)
code
129003546/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns
code
129003546/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.head(2)
code
129003546/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame() x_values, y_values = (types.values, types.index) director = data['director'].value_counts().head(10) director.to_frame() x_values = director.values y_values = director.index data.dtypes release_year = data['release_year'] release_year.to_frame() x_values = release_year.values plt.xticks(rotation=90) rating = data['rating'] rating.to_frame() x_values = rating.values y_values = rating.index plt.figure(figsize=(10, 5)) sns.countplot(x=x_values) plt.xlabel('year') plt.xticks(rotation=90)
code
129003546/cell_24
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.dtypes release_year = data['release_year'] release_year.to_frame()
code
129003546/cell_22
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.head(2)
code
129003546/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum()
code
129003546/cell_12
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() data.head(2)
code
129003546/cell_5
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes
code
129003546/cell_36
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('//kaggle//input//netflix-shows//netflix_titles.csv') data data.dtypes data.isnull().sum() data.dropna(inplace=True) data.isnull().sum() types = data['type'].value_counts() types.to_frame() x_values, y_values = (types.values, types.index) director = data['director'].value_counts().head(10) director.to_frame() x_values = director.values y_values = director.index data.dtypes release_year = data['release_year'] release_year.to_frame() x_values = release_year.values plt.xticks(rotation=90) rating = data['rating'] rating.to_frame() x_values = rating.values y_values = rating.index plt.xticks(rotation=90) country = data['country'].value_counts().head(10) country.to_frame() x_values = country.values y_values = country.index plt.figure(figsize=(10, 5)) sns.barplot(x=x_values, y=y_values, palette='rainbow') plt.ylabel('Country') plt.xticks(rotation=90)
code
2023611/cell_21
[ "text_plain_output_1.png" ]
from matplotlib import cm import h5py import matplotlib.pylab as plt import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) # Plot letter images fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(5): image = tensors[i*200]/255 ax[i].imshow(image) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of letters', fontsize=25); fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True) ax = ax.flatten() for i in range(5): image = x_test[i * 10].reshape(32, 32) ax[i].imshow(image, cmap=cm.bone) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of original grayscaled letters', fontsize=25)
code
2023611/cell_13
[ "image_output_1.png" ]
from keras.utils import to_categorical import h5py import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) tensors = tensors.astype('float32') / 255 gray_tensors = np.dot(tensors[..., :3], [0.299, 0.587, 0.114]) gray_tensors = gray_tensors.reshape(-1, 32, 32, 1) cat_targets = to_categorical(np.array(targets - 1), 33) cat_targets.shape backgrounds = to_categorical(backgrounds, 2) backgrounds.shape back_targets = np.concatenate((cat_targets, backgrounds), axis=1) back_targets.shape
code
2023611/cell_6
[ "image_output_1.png" ]
import h5py import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) print('Tensor shape:', tensors.shape) print('Target shape', targets.shape) print('Background shape:', backgrounds.shape)
code
2023611/cell_2
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import math import tensorflow as tf from sklearn.model_selection import train_test_split from keras.utils import to_categorical import h5py import cv2 from keras.models import Sequential, load_model, Model from keras.layers import Input, UpSampling2D from keras.layers import Dense, LSTM, GlobalAveragePooling1D, GlobalAveragePooling2D from keras.layers import Activation, Flatten, Dropout, BatchNormalization from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D import matplotlib.pylab as plt from matplotlib import cm
code
2023611/cell_11
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical import h5py import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) tensors = tensors.astype('float32') / 255 gray_tensors = np.dot(tensors[..., :3], [0.299, 0.587, 0.114]) gray_tensors = gray_tensors.reshape(-1, 32, 32, 1) cat_targets = to_categorical(np.array(targets - 1), 33) cat_targets.shape
code
2023611/cell_7
[ "image_output_1.png" ]
import h5py import matplotlib.pylab as plt import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True) ax = ax.flatten() for i in range(5): image = tensors[i * 200] / 255 ax[i].imshow(image) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of letters', fontsize=25)
code
2023611/cell_18
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D from keras.layers import Input, UpSampling2D from keras.models import Sequential, load_model, Model def autoencoder(): inputs = Input(shape=(32, 32, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPooling2D(padding='same')(x) x = Conv2D(16, 3, activation='relu', padding='same')(x) x = MaxPooling2D(padding='same')(x) x = Conv2D(8, 3, activation='relu', padding='same')(x) encoded = MaxPooling2D(padding='same')(x) x = Conv2D(8, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Conv2D(16, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) autoencoder = Model(inputs, decoded) autoencoder.compile(optimizer='nadam', loss='binary_crossentropy') return autoencoder autoencoder = autoencoder() autoencoder.summary()
code
2023611/cell_22
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D, GlobalMaxPooling2D from keras.layers import Input, UpSampling2D from keras.models import Sequential, load_model, Model from matplotlib import cm import h5py import matplotlib.pylab as plt import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) # Plot letter images fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(5): image = tensors[i*200]/255 ax[i].imshow(image) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of letters', fontsize=25); def autoencoder(): inputs = Input(shape=(32, 32, 1)) x = Conv2D(32, 3, activation='relu', padding='same')(inputs) x = MaxPooling2D(padding='same')(x) x = Conv2D(16, 3, activation='relu', padding='same')(x) x = MaxPooling2D(padding='same')(x) x = Conv2D(8, 3, activation='relu', padding='same')(x) encoded = MaxPooling2D(padding='same')(x) x = Conv2D(8, 3, activation='relu', padding='same')(encoded) x = UpSampling2D()(x) x = Conv2D(16, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) x = Conv2D(32, 3, activation='relu', padding='same')(x) x = UpSampling2D()(x) decoded = Conv2D(1, 3, activation='sigmoid', padding='same')(x) autoencoder = Model(inputs, decoded) autoencoder.compile(optimizer='nadam', loss='binary_crossentropy') return autoencoder autoencoder = autoencoder() autoencoder.summary() autoencoder_history = autoencoder.fit(x_train, x_train, epochs=200, batch_size=64, verbose=0, validation_data=(x_valid, x_valid)) x_test_decoded = autoencoder.predict(x_test) # Plot original grayscaled images fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True,) ax = ax.flatten() for i in range(5): image = x_test[i*10].reshape(32,32) ax[i].imshow(image, cmap=cm.bone) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of original grayscaled letters', fontsize=25); fig, ax = plt.subplots(figsize=(18, 3), nrows=1, ncols=5, sharex=True, sharey=True) ax = ax.flatten() for i in range(5): image = x_test_decoded[i * 10].reshape(32, 32) ax[i].imshow(image, cmap=cm.bone) ax[0].set_xticks([]) ax[0].set_yticks([]) plt.tight_layout() plt.gcf() ax[2].set_title('Examples of decoded grayscaled letters', fontsize=25)
code
2023611/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import h5py import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) tensors = tensors.astype('float32') / 255 gray_tensors = np.dot(tensors[..., :3], [0.299, 0.587, 0.114]) gray_tensors = gray_tensors.reshape(-1, 32, 32, 1) print('Grayscaled Tensor shape:', gray_tensors.shape)
code
2023611/cell_12
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical import h5py import numpy as np import pandas as pd data = pd.read_csv('../input/letters.csv') files = data['file'] letters = data['letter'] backgrounds = data['background'] f = h5py.File('../input/LetterColorImages.h5', 'r') keys = list(f.keys()) keys backgrounds = np.array(f[keys[0]]) tensors = np.array(f[keys[1]]) targets = np.array(f[keys[2]]) backgrounds = to_categorical(backgrounds, 2) backgrounds.shape
code