path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
128017162/cell_28
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') test = pd.read_csv('/kaggle/input/hackerearth/dataset/test.csv') preds_list = test['Image'] preds_list
code
128017162/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') num = len(train['Class'].unique()) print('Total Labels : ', str(num))
code
128017162/cell_15
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential import pandas as pd import visualkeras as vk train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') num = len(train['Class'].unique()) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() vk.layered_view(model, legend=True)
code
128017162/cell_16
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential from tensorflow.keras.utils import plot_model, load_img, to_categorical import pandas as pd train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') num = len(train['Class'].unique()) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() plot_model(model)
code
128017162/cell_38
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') train['Class'].unique()
code
128017162/cell_14
[ "text_html_output_1.png" ]
from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential import pandas as pd train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') num = len(train['Class'].unique()) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary()
code
128017162/cell_22
[ "image_output_1.png" ]
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping, Callback from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import matplotlib.pyplot as plt import pandas as pd train_dir = '/kaggle/input/hackerearth/dataset/Train Images' test_dir = '/kaggle/input/hackerearth/dataset/Test Images' train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') num = len(train['Class'].unique()) datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2) train_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', batch_size=32, subset='training', shuffle=True) valid_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', subset='validation', batch_size=32, shuffle=True) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) class myCallback(Callback): def on_epoch_end(self, epoch, logs={}): if logs.get('accuracy') >= 0.98: self.model.stop_training = True callback = myCallback() hist = model.fit(train_it, epochs=1500, validation_data=valid_it, callbacks=callback) plt.figure() plt.plot(hist.history['loss'], label='Train Loss', color='black') plt.plot(hist.history['val_loss'], label='Validation Loss', color='mediumvioletred', linestyle='dashed', markeredgecolor='purple', markeredgewidth=2) plt.title('Model Loss', color='darkred', size=13) plt.legend() plt.show()
code
128017162/cell_27
[ "image_output_1.png" ]
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping, Callback from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd train_dir = '/kaggle/input/hackerearth/dataset/Train Images' test_dir = '/kaggle/input/hackerearth/dataset/Test Images' train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') test = pd.read_csv('/kaggle/input/hackerearth/dataset/test.csv') num = len(train['Class'].unique()) datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2) train_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', batch_size=32, subset='training', shuffle=True) valid_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', subset='validation', batch_size=32, shuffle=True) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) class myCallback(Callback): def on_epoch_end(self, epoch, logs={}): if logs.get('accuracy') >= 0.98: self.model.stop_training = True callback = myCallback() hist = model.fit(train_it, epochs=1500, validation_data=valid_it, callbacks=callback) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) SIZE = (150, 150, 3) test_generator = test_datagen.flow_from_dataframe(test, directory=test_dir, x_col='Image', y_col=None, class_mode=None, target_size=(150, 150)) preds = model.predict(test_generator) y_pred = [np.argmax(probas) for probas in preds] len(y_pred)
code
128017162/cell_37
[ "text_plain_output_1.png" ]
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping, Callback from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd train_dir = '/kaggle/input/hackerearth/dataset/Train Images' test_dir = '/kaggle/input/hackerearth/dataset/Test Images' train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') test = pd.read_csv('/kaggle/input/hackerearth/dataset/test.csv') num = len(train['Class'].unique()) datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2) train_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', batch_size=32, subset='training', shuffle=True) valid_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', subset='validation', batch_size=32, shuffle=True) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) class myCallback(Callback): def on_epoch_end(self, epoch, logs={}): if logs.get('accuracy') >= 0.98: self.model.stop_training = True callback = myCallback() hist = model.fit(train_it, epochs=1500, validation_data=valid_it, callbacks=callback) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) SIZE = (150, 150, 3) test_generator = test_datagen.flow_from_dataframe(test, directory=test_dir, x_col='Image', y_col=None, class_mode=None, target_size=(150, 150)) preds = model.predict(test_generator) y_pred = [np.argmax(probas) for probas in preds] preds_list = test['Image'] preds_list labels = train['Class'].unique() prediction = [] for i in y_pred: if i == 0: prediction.append(labels[0]) elif i == 1: prediction.append(labels[1]) elif i == 2: prediction.append(labels[2]) else: prediction.append(labels[3]) results = pd.DataFrame({'Image': preds_list, 'Class': prediction}) results['Class'].unique()
code
128017162/cell_36
[ "text_plain_output_1.png" ]
from tensorflow.keras.callbacks import ModelCheckpoint,EarlyStopping, Callback from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten, Conv2D, MaxPool2D, BatchNormalization from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing.image import ImageDataGenerator import numpy as np import pandas as pd train_dir = '/kaggle/input/hackerearth/dataset/Train Images' test_dir = '/kaggle/input/hackerearth/dataset/Test Images' train = pd.read_csv('/kaggle/input/hackerearth/dataset/train.csv') test = pd.read_csv('/kaggle/input/hackerearth/dataset/test.csv') num = len(train['Class'].unique()) datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2) train_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', batch_size=32, subset='training', shuffle=True) valid_it = datagen.flow_from_dataframe(train, directory=train_dir, x_col='Image', y_col='Class', target_size=(150, 150), class_mode='categorical', subset='validation', batch_size=32, shuffle=True) model = Sequential() model.add(Conv2D(16, kernel_size=(5, 5), activation='relu', input_shape=(150, 150, 3))) model.add(MaxPool2D((2, 2))) model.add(Conv2D(32, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(64, kernel_size=(5, 5), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Conv2D(128, kernel_size=(3, 3), activation='relu')) model.add(MaxPool2D((2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu')) model.add(Dense(128, activation='relu')) model.add(Dense(64, activation='relu')) model.add(Dense(num, activation='softmax')) model.summary() model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) class myCallback(Callback): def on_epoch_end(self, epoch, logs={}): if logs.get('accuracy') >= 0.98: self.model.stop_training = True callback = myCallback() hist = model.fit(train_it, epochs=1500, validation_data=valid_it, callbacks=callback) test_datagen = ImageDataGenerator(rescale=1.0 / 255.0) SIZE = (150, 150, 3) test_generator = test_datagen.flow_from_dataframe(test, directory=test_dir, x_col='Image', y_col=None, class_mode=None, target_size=(150, 150)) preds = model.predict(test_generator) y_pred = [np.argmax(probas) for probas in preds] preds_list = test['Image'] preds_list labels = train['Class'].unique() prediction = [] for i in y_pred: if i == 0: prediction.append(labels[0]) elif i == 1: prediction.append(labels[1]) elif i == 2: prediction.append(labels[2]) else: prediction.append(labels[3]) results = pd.DataFrame({'Image': preds_list, 'Class': prediction}) results
code
122265193/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/air-concentration-for-the-chernobyl-disaster/data.csv') data.info()
code
122265193/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/air-concentration-for-the-chernobyl-disaster/data.csv') m = data['I_131_(Bq/m3)'].str.contains('L|\\?', regex=True, na=False) data.loc[m, 'I_131_(Bq/m3)'] = None m = data['Cs_134_(Bq/m3)'].str.contains('N|\\?', regex=True) data.loc[m, 'Cs_134_(Bq/m3)'] = None m = data['Cs_137_(Bq/m3)'].str.contains('N|\\?', regex=True) data.loc[m, 'Cs_137_(Bq/m3)'] = None data.info()
code
122265193/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/air-concentration-for-the-chernobyl-disaster/data.csv') print(sorted(data['I_131_(Bq/m3)'].unique())[-5:]) print(sorted(data['Cs_134_(Bq/m3)'].unique())[-5:]) print(sorted(data['Cs_137_(Bq/m3)'].unique())[-5:])
code
122265193/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/air-concentration-for-the-chernobyl-disaster/data.csv') data
code
122265193/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/air-concentration-for-the-chernobyl-disaster/data.csv') m = data['I_131_(Bq/m3)'].str.contains('L|\\?', regex=True, na=False) data.loc[m, 'I_131_(Bq/m3)'] = None m = data['Cs_134_(Bq/m3)'].str.contains('N|\\?', regex=True) data.loc[m, 'Cs_134_(Bq/m3)'] = None m = data['Cs_137_(Bq/m3)'].str.contains('N|\\?', regex=True) data.loc[m, 'Cs_137_(Bq/m3)'] = None data.Ville.unique()
code
16154606/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from pytorch_pretrained_bert import BertConfig from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam from tqdm import tqdm import numpy as np import pandas as pd import torch device = torch.device('cuda') def convert_lines(example, max_seq_length, tokenizer): max_seq_length -= 2 all_tokens = [] longer = 0 for text in tqdm(example): tokens_a = tokenizer.tokenize(text) if len(tokens_a) > max_seq_length: tokens_a = tokens_a[:max_seq_length] longer += 1 one_token = tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens_a + ['[SEP]']) + [0] * (max_seq_length - len(tokens_a)) all_tokens.append(one_token) return np.array(all_tokens) MAX_SEQUENCE_LENGTH = 200 SEED = 42 BATCH_SIZE = 32 BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True bert_config = BertConfig('../input/bertinference/bert_config.json') tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None, do_lower_case=True) BERT_SMALL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' BERT_LARGE_PATH = '../input/bert-pretrained-models/uncased_l-24_h-1024_a-16/uncased_L-24_H-1024_A-16/' test_df = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv') test_df['comment_text'] = test_df['comment_text'].astype(str) X_test = convert_lines(test_df['comment_text'].fillna('DUMMY_VALUE'), MAX_SEQUENCE_LENGTH, tokenizer) model = BertForSequenceClassification(bert_config, num_labels=1) model.load_state_dict(torch.load('../input/bertinference/pytorch_bert_6.bin')) model.to(device) for param in model.parameters(): param.requires_grad = False model.eval() test_preds = np.zeros(len(X_test)) test = torch.utils.data.TensorDataset(torch.tensor(X_test, dtype=torch.long)) test_loader = torch.utils.data.DataLoader(test, batch_size=32, shuffle=False) tk0 = tqdm(test_loader) for i, (x_batch,) in enumerate(tk0): pred = model(x_batch.to(device), attention_mask=(x_batch > 0).to(device), labels=None) test_preds[i * 32:(i + 1) * 32] = pred[:, 0].detach().cpu().squeeze().numpy() test_pred = torch.sigmoid(torch.tensor(test_preds)).numpy().ravel()
code
16154606/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pytorch_pretrained_bert import BertConfig from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam from tqdm import tqdm import numpy as np import pandas as pd import torch device = torch.device('cuda') def convert_lines(example, max_seq_length, tokenizer): max_seq_length -= 2 all_tokens = [] longer = 0 for text in tqdm(example): tokens_a = tokenizer.tokenize(text) if len(tokens_a) > max_seq_length: tokens_a = tokens_a[:max_seq_length] longer += 1 one_token = tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens_a + ['[SEP]']) + [0] * (max_seq_length - len(tokens_a)) all_tokens.append(one_token) return np.array(all_tokens) MAX_SEQUENCE_LENGTH = 200 SEED = 42 BATCH_SIZE = 32 BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True bert_config = BertConfig('../input/bertinference/bert_config.json') tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None, do_lower_case=True) BERT_SMALL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' BERT_LARGE_PATH = '../input/bert-pretrained-models/uncased_l-24_h-1024_a-16/uncased_L-24_H-1024_A-16/' test_df = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/test.csv') test_df['comment_text'] = test_df['comment_text'].astype(str) X_test = convert_lines(test_df['comment_text'].fillna('DUMMY_VALUE'), MAX_SEQUENCE_LENGTH, tokenizer)
code
16154606/cell_7
[ "text_plain_output_1.png" ]
from pytorch_pretrained_bert import BertConfig from pytorch_pretrained_bert import BertTokenizer, BertForSequenceClassification, BertAdam from tqdm import tqdm import numpy as np import torch device = torch.device('cuda') def convert_lines(example, max_seq_length, tokenizer): max_seq_length -= 2 all_tokens = [] longer = 0 for text in tqdm(example): tokens_a = tokenizer.tokenize(text) if len(tokens_a) > max_seq_length: tokens_a = tokens_a[:max_seq_length] longer += 1 one_token = tokenizer.convert_tokens_to_ids(['[CLS]'] + tokens_a + ['[SEP]']) + [0] * (max_seq_length - len(tokens_a)) all_tokens.append(one_token) return np.array(all_tokens) MAX_SEQUENCE_LENGTH = 200 SEED = 42 BATCH_SIZE = 32 BERT_MODEL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True bert_config = BertConfig('../input/bertinference/bert_config.json') tokenizer = BertTokenizer.from_pretrained(BERT_MODEL_PATH, cache_dir=None, do_lower_case=True) BERT_SMALL_PATH = '../input/bert-pretrained-models/uncased_l-12_h-768_a-12/uncased_L-12_H-768_A-12/' BERT_LARGE_PATH = '../input/bert-pretrained-models/uncased_l-24_h-1024_a-16/uncased_L-24_H-1024_A-16/' model = BertForSequenceClassification(bert_config, num_labels=1) model.load_state_dict(torch.load('../input/bertinference/pytorch_bert_6.bin')) model.to(device) for param in model.parameters(): param.requires_grad = False model.eval()
code
73095926/cell_9
[ "text_plain_output_1.png" ]
from sklearn import model_selection import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() from sklearn import model_selection df['kfold'] = -1 df = df.sample(frac=1).reset_index(drop=True) y = df.income.values kf = model_selection.StratifiedKFold(n_splits=5) for fold, (train_, valid_) in enumerate(kf.split(X=df, y=y)): df.loc[valid_, 'kfold'] = fold df.to_csv('./adult_folds.csv', index=False) df_fold = pd.read_csv('./adult_folds.csv') df_train = df[df.kfold != 0].reset_index(drop=True) df_train.income.isnull().sum()
code
73095926/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts()
code
73095926/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73095926/cell_15
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import xgboost as xgb df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() from sklearn import model_selection df['kfold'] = -1 df = df.sample(frac=1).reset_index(drop=True) y = df.income.values kf = model_selection.StratifiedKFold(n_splits=5) for fold, (train_, valid_) in enumerate(kf.split(X=df, y=y)): df.loc[valid_, 'kfold'] = fold df.to_csv('./adult_folds.csv', index=False) df_fold = pd.read_csv('./adult_folds.csv') df_train = df[df.kfold != 0].reset_index(drop=True) df_train.income.isnull().sum() from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing def run(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] df = df.drop(num_cols, axis=1) target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: df.loc[:, col] = df[col].astype(str).fillna('NONE') df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) ohe = preprocessing.OneHotEncoder() All_data = pd.concat([df_train[features], df_valid[features]], axis=0) ohe.fit(All_data[features]) x_train = ohe.transform(df_train[features]) x_valid = ohe.transform(df_valid[features]) model = linear_model.LogisticRegression(solver='liblinear') model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) if __name__ == '__main__': for fold_ in range(5): run(fold_) import warnings warnings.filterwarnings('ignore') import xgboost as xgb def Xgboost_fold(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] df = df.drop(num_cols, axis=1) target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: df.loc[:, col] = df[col].astype(str).fillna('NONE') for col in features: lbl = preprocessing.LabelEncoder() lbl.fit(df[col]) df.loc[:, col] = lbl.transform(df[col]) df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) x_train = df_train[features].values x_valid = df_valid[features].values model = xgb.XGBClassifier(n_jobs=-1) model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) if __name__ == '__main__': for fold_ in range(5): Xgboost_fold(fold_) def Xgboost_fold(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: if col not in num_cols: df.loc[:, col] = df[col].astype(str).fillna('NONE') for col in features: if col not in num_cols: lbl = preprocessing.LabelEncoder() lbl.fit(df[col]) df.loc[:, col] = lbl.transform(df[col]) df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) x_train = df_train[features].values x_valid = df_valid[features].values model = xgb.XGBClassifier(n_jobs=-1) model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) print(f'Fold = {fold}, AUC = {auc}') if __name__ == '__main__': for fold_ in range(5): Xgboost_fold(fold_)
code
73095926/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() df.head()
code
73095926/cell_10
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() from sklearn import model_selection df['kfold'] = -1 df = df.sample(frac=1).reset_index(drop=True) y = df.income.values kf = model_selection.StratifiedKFold(n_splits=5) for fold, (train_, valid_) in enumerate(kf.split(X=df, y=y)): df.loc[valid_, 'kfold'] = fold df.to_csv('./adult_folds.csv', index=False) df_fold = pd.read_csv('./adult_folds.csv') df_train = df[df.kfold != 0].reset_index(drop=True) df_train.income.isnull().sum() from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing def run(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] df = df.drop(num_cols, axis=1) target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: df.loc[:, col] = df[col].astype(str).fillna('NONE') df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) ohe = preprocessing.OneHotEncoder() All_data = pd.concat([df_train[features], df_valid[features]], axis=0) ohe.fit(All_data[features]) x_train = ohe.transform(df_train[features]) x_valid = ohe.transform(df_valid[features]) model = linear_model.LogisticRegression(solver='liblinear') model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) print(f'Fold = {fold}, AUC = {auc}') if __name__ == '__main__': for fold_ in range(5): run(fold_)
code
73095926/cell_12
[ "text_html_output_1.png" ]
from sklearn import linear_model from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import warnings import xgboost as xgb df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() from sklearn import model_selection df['kfold'] = -1 df = df.sample(frac=1).reset_index(drop=True) y = df.income.values kf = model_selection.StratifiedKFold(n_splits=5) for fold, (train_, valid_) in enumerate(kf.split(X=df, y=y)): df.loc[valid_, 'kfold'] = fold df.to_csv('./adult_folds.csv', index=False) df_fold = pd.read_csv('./adult_folds.csv') df_train = df[df.kfold != 0].reset_index(drop=True) df_train.income.isnull().sum() from sklearn import linear_model from sklearn import metrics from sklearn import preprocessing def run(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] df = df.drop(num_cols, axis=1) target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: df.loc[:, col] = df[col].astype(str).fillna('NONE') df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) ohe = preprocessing.OneHotEncoder() All_data = pd.concat([df_train[features], df_valid[features]], axis=0) ohe.fit(All_data[features]) x_train = ohe.transform(df_train[features]) x_valid = ohe.transform(df_valid[features]) model = linear_model.LogisticRegression(solver='liblinear') model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) if __name__ == '__main__': for fold_ in range(5): run(fold_) import warnings warnings.filterwarnings('ignore') import xgboost as xgb def Xgboost_fold(fold): df = pd.read_csv('./adult_folds.csv') num_cols = ['fnlwgt', 'age', 'capital.gain', 'capital.loss', 'hours.per.week'] df = df.drop(num_cols, axis=1) target_mapping = {'<=50K': 0, '>50K': 1} df.loc[:, 'income'] = df.income.map(target_mapping) features = [f for f in df.columns if f not in ('kfold', 'income')] for col in features: df.loc[:, col] = df[col].astype(str).fillna('NONE') for col in features: lbl = preprocessing.LabelEncoder() lbl.fit(df[col]) df.loc[:, col] = lbl.transform(df[col]) df_train = df[df.kfold != fold].reset_index(drop=True) df_valid = df[df.kfold == fold].reset_index(drop=True) x_train = df_train[features].values x_valid = df_valid[features].values model = xgb.XGBClassifier(n_jobs=-1) model.fit(x_train, df_train.income.values) valid_preds = model.predict_proba(x_valid)[:, 1] auc = metrics.roc_auc_score(df_valid.income.values, valid_preds) print(f'Fold = {fold}, AUC = {auc}') if __name__ == '__main__': for fold_ in range(5): Xgboost_fold(fold_)
code
73095926/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/adult-census-income/adult.csv') df.income.value_counts() df['income'].isnull().sum()
code
90157865/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import plotly.express as px vaults = pd.read_csv('../input/psolvaults/output.csv') solPrice = 80 vaults['debt'] = vaults['debtAmount'] / 10 ** vaults['decimal'] vaults['debtValue'] = vaults['debt'] * solPrice vaults['collateral'] = vaults['collateralAmount'] / 10 ** vaults['decimal'] vaults['collateralValue'] = vaults['collateral'] * solPrice vaults
code
73088459/cell_21
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,auc,classification_report,confusion_matrix,mean_squared_error, precision_score, recall_score,roc_curve from sklearn.model_selection import cross_val_score,cross_val_predict,cross_validate,train_test_split,GridSearchCV,KFold,RepeatedKFold,learning_curve,RandomizedSearchCV,StratifiedKFold from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder from xgboost import XGBRegressor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import time train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) preprocessing1 = 'ordinal & ione-hot encoding' Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] Cfeatures = [col for col in useful_features if 'cat' in col] low_card_columns = [cname for cname in train.columns if train[cname].nunique() < 10 and train[cname].dtype == 'object'] high_card_columns = [cname for cname in train.columns if train[cname].nunique() >= 10 and train[cname].dtype == 'object'] Xtrain = train.copy() Xtest = test.copy() ordinal_encoder = OrdinalEncoder() train[high_card_columns] = ordinal_encoder.fit_transform(train[high_card_columns]) test[high_card_columns] = ordinal_encoder.transform(test[high_card_columns]) OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train[low_card_columns])) OH_cols_test = pd.DataFrame(OH_encoder.transform(test[low_card_columns])) OH_cols_train.index = train.index OH_cols_test.index = test.index all_cols = OH_cols_train.columns new_cols = [i for i in all_cols if isinstance(i, (int, float))] OH_cols_train = OH_cols_train[new_cols].add_prefix('cat_encode_') OH_cols_test = OH_cols_test[new_cols].add_prefix('cat_encode_') num_X_train = train.drop(low_card_columns, axis=1) num_X_test = test.drop(low_card_columns, axis=1) train = pd.concat([num_X_train, OH_cols_train], axis=1) test = pd.concat([num_X_test, OH_cols_test], axis=1) useful_features = [c for c in train.columns if c not in ('id', 'target', 'kfold')] Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] train = train[Nfeatures] test = test[Nfeatures] X_train, X_valid, y_train, y_valid = train_test_split(train, y, train_size=0.025, test_size=0.025, random_state=0) train_size = 0.025 test = test PreprocessPerformanced_df = pd.DataFrame(columns=['modelname', 'preprocessing1', 'preprocessing2', 'datashape', 'trainsize', 'mean_squared_error']) modelname = 'XGBRegressor' preprocessing1 = 'none' preprocessing2 = 'none' modelname = 'XGBRegressor' trainingshape = train.shape model = XGBRegressor(n_estimators=1000, learning_rate=0.03, random_state=1, n_jobs=2) model.fit(X_train, y_train, early_stopping_rounds=20, eval_set=[(X_valid, y_valid)], verbose=False) preds_valid = model.predict(X_valid) mse_score = mean_squared_error(y_valid, preds_valid, squared=False) PreprocessPerformanced_df = PreprocessPerformanced_df.append({'modelname': modelname, 'preprocessing1': preprocessing1, 'preprocessing2': preprocessing2, 'datshape': trainingshape, 'trainsize': train_size, 'mean_squared_error': mse_score}, ignore_index=True) predictions = model.predict(test) output = pd.DataFrame({'Id': test.index, 'target': predictions}) output.to_csv('basic_xgboost_submission.csv', index=False) PreprocessPerformanced_df = PreprocessPerformanced_df.sort_values('mean_squared_error', ascending=True) print(PreprocessPerformanced_df) filename = 'preprocessing_' + time.strftime('%Y_%m_%d_%H_%M') + '.csv' output = PreprocessPerformanced_df print('\nreview saved as', filename)
code
73088459/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from scipy.stats import norm, randint from math import ceil import time import os import warnings warnings.filterwarnings('ignore') import matplotlib.pyplot as plt import seaborn as sns from catboost import CatBoostRegressor from sklearn.compose import ColumnTransformer from sklearn.datasets import make_regression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, GradientBoostingClassifier, GradientBoostingRegressor, RandomForestClassifier, RandomForestRegressor, VotingClassifier from sklearn.feature_selection import mutual_info_regression from sklearn.impute import SimpleImputer from sklearn.linear_model import LogisticRegression, Perceptron, SGDClassifier, LogisticRegression, PassiveAggressiveClassifier, RidgeClassifierCV, Ridge from sklearn.metrics import accuracy_score, auc, classification_report, confusion_matrix, mean_squared_error, precision_score, recall_score, roc_curve from sklearn.metrics import mean_squared_error as MSE from sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate, train_test_split, GridSearchCV, KFold, RepeatedKFold, learning_curve, RandomizedSearchCV, StratifiedKFold from sklearn.multiclass import OneVsRestClassifier from sklearn.naive_bayes import GaussianNB from sklearn.pipeline import Pipeline from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder from sklearn.svm import SVC, LinearSVC, SVR from sklearn.tree import DecisionTreeClassifier from sklearn import ensemble, linear_model, neighbors, svm, tree, model_selection, preprocessing from sklearn import utils from xgboost import XGBRegressor from lightgbm import LGBMRegressor import lightgbm as lgbm for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
73088459/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() print('Data Import Complete') preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) print('Target data separated')
code
73088459/cell_19
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score,auc,classification_report,confusion_matrix,mean_squared_error, precision_score, recall_score,roc_curve from sklearn.model_selection import cross_val_score,cross_val_predict,cross_validate,train_test_split,GridSearchCV,KFold,RepeatedKFold,learning_curve,RandomizedSearchCV,StratifiedKFold from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder from xgboost import XGBRegressor import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) preprocessing1 = 'ordinal & ione-hot encoding' Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] Cfeatures = [col for col in useful_features if 'cat' in col] low_card_columns = [cname for cname in train.columns if train[cname].nunique() < 10 and train[cname].dtype == 'object'] high_card_columns = [cname for cname in train.columns if train[cname].nunique() >= 10 and train[cname].dtype == 'object'] Xtrain = train.copy() Xtest = test.copy() ordinal_encoder = OrdinalEncoder() train[high_card_columns] = ordinal_encoder.fit_transform(train[high_card_columns]) test[high_card_columns] = ordinal_encoder.transform(test[high_card_columns]) OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train[low_card_columns])) OH_cols_test = pd.DataFrame(OH_encoder.transform(test[low_card_columns])) OH_cols_train.index = train.index OH_cols_test.index = test.index all_cols = OH_cols_train.columns new_cols = [i for i in all_cols if isinstance(i, (int, float))] OH_cols_train = OH_cols_train[new_cols].add_prefix('cat_encode_') OH_cols_test = OH_cols_test[new_cols].add_prefix('cat_encode_') num_X_train = train.drop(low_card_columns, axis=1) num_X_test = test.drop(low_card_columns, axis=1) train = pd.concat([num_X_train, OH_cols_train], axis=1) test = pd.concat([num_X_test, OH_cols_test], axis=1) useful_features = [c for c in train.columns if c not in ('id', 'target', 'kfold')] Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] train = train[Nfeatures] test = test[Nfeatures] X_train, X_valid, y_train, y_valid = train_test_split(train, y, train_size=0.025, test_size=0.025, random_state=0) train_size = 0.025 test = test PreprocessPerformanced_df = pd.DataFrame(columns=['modelname', 'preprocessing1', 'preprocessing2', 'datashape', 'trainsize', 'mean_squared_error']) modelname = 'XGBRegressor' preprocessing1 = 'none' preprocessing2 = 'none' modelname = 'XGBRegressor' trainingshape = train.shape print(trainingshape) model = XGBRegressor(n_estimators=1000, learning_rate=0.03, random_state=1, n_jobs=2) model.fit(X_train, y_train, early_stopping_rounds=20, eval_set=[(X_valid, y_valid)], verbose=False) preds_valid = model.predict(X_valid) mse_score = mean_squared_error(y_valid, preds_valid, squared=False) print(mse_score) PreprocessPerformanced_df = PreprocessPerformanced_df.append({'modelname': modelname, 'preprocessing1': preprocessing1, 'preprocessing2': preprocessing2, 'datshape': trainingshape, 'trainsize': train_size, 'mean_squared_error': mse_score}, ignore_index=True) print('Results added to comparison file') predictions = model.predict(test) output = pd.DataFrame({'Id': test.index, 'target': predictions}) output.to_csv('basic_xgboost_submission.csv', index=False) print('basic xgboost submission complete')
code
73088459/cell_8
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] print(Cfeatures) ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) print('All category columns converted to ordinal')
code
73088459/cell_17
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) preprocessing1 = 'ordinal & ione-hot encoding' Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] Cfeatures = [col for col in useful_features if 'cat' in col] low_card_columns = [cname for cname in train.columns if train[cname].nunique() < 10 and train[cname].dtype == 'object'] high_card_columns = [cname for cname in train.columns if train[cname].nunique() >= 10 and train[cname].dtype == 'object'] Xtrain = train.copy() Xtest = test.copy() ordinal_encoder = OrdinalEncoder() train[high_card_columns] = ordinal_encoder.fit_transform(train[high_card_columns]) test[high_card_columns] = ordinal_encoder.transform(test[high_card_columns]) OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train[low_card_columns])) OH_cols_test = pd.DataFrame(OH_encoder.transform(test[low_card_columns])) OH_cols_train.index = train.index OH_cols_test.index = test.index all_cols = OH_cols_train.columns new_cols = [i for i in all_cols if isinstance(i, (int, float))] OH_cols_train = OH_cols_train[new_cols].add_prefix('cat_encode_') OH_cols_test = OH_cols_test[new_cols].add_prefix('cat_encode_') num_X_train = train.drop(low_card_columns, axis=1) num_X_test = test.drop(low_card_columns, axis=1) train = pd.concat([num_X_train, OH_cols_train], axis=1) test = pd.concat([num_X_test, OH_cols_test], axis=1) PreprocessPerformanced_df = pd.DataFrame(columns=['modelname', 'preprocessing1', 'preprocessing2', 'datashape', 'trainsize', 'mean_squared_error']) modelname = 'XGBRegressor' preprocessing1 = 'none' preprocessing2 = 'none' print('Dataframe created')
code
73088459/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import cross_val_score,cross_val_predict,cross_validate,train_test_split,GridSearchCV,KFold,RepeatedKFold,learning_curve,RandomizedSearchCV,StratifiedKFold from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) preprocessing1 = 'ordinal & ione-hot encoding' Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] Cfeatures = [col for col in useful_features if 'cat' in col] low_card_columns = [cname for cname in train.columns if train[cname].nunique() < 10 and train[cname].dtype == 'object'] high_card_columns = [cname for cname in train.columns if train[cname].nunique() >= 10 and train[cname].dtype == 'object'] Xtrain = train.copy() Xtest = test.copy() ordinal_encoder = OrdinalEncoder() train[high_card_columns] = ordinal_encoder.fit_transform(train[high_card_columns]) test[high_card_columns] = ordinal_encoder.transform(test[high_card_columns]) OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train[low_card_columns])) OH_cols_test = pd.DataFrame(OH_encoder.transform(test[low_card_columns])) OH_cols_train.index = train.index OH_cols_test.index = test.index all_cols = OH_cols_train.columns new_cols = [i for i in all_cols if isinstance(i, (int, float))] OH_cols_train = OH_cols_train[new_cols].add_prefix('cat_encode_') OH_cols_test = OH_cols_test[new_cols].add_prefix('cat_encode_') num_X_train = train.drop(low_card_columns, axis=1) num_X_test = test.drop(low_card_columns, axis=1) train = pd.concat([num_X_train, OH_cols_train], axis=1) test = pd.concat([num_X_test, OH_cols_test], axis=1) useful_features = [c for c in train.columns if c not in ('id', 'target', 'kfold')] Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] train = train[Nfeatures] test = test[Nfeatures] X_train, X_valid, y_train, y_valid = train_test_split(train, y, train_size=0.025, test_size=0.025, random_state=0) train_size = 0.025 test = test print('Data split')
code
73088459/cell_12
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col='id') test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col='id') full_df = train.copy() fulltest_df = test.copy() preprocessing1 = 'none' preprocessing2 = 'none' y = train.target train.drop(['target'], axis=1, inplace=True) preprocessing1 = 'ordinal encoding' Cfeatures = [col for col in useful_features if 'cat' in col] ordinal_encoder = OrdinalEncoder() train[Cfeatures] = ordinal_encoder.fit_transform(train[Cfeatures]) test[Cfeatures] = ordinal_encoder.transform(test[Cfeatures]) preprocessing1 = 'ordinal & ione-hot encoding' Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] Cfeatures = [col for col in useful_features if 'cat' in col] low_card_columns = [cname for cname in train.columns if train[cname].nunique() < 10 and train[cname].dtype == 'object'] high_card_columns = [cname for cname in train.columns if train[cname].nunique() >= 10 and train[cname].dtype == 'object'] Xtrain = train.copy() Xtest = test.copy() ordinal_encoder = OrdinalEncoder() train[high_card_columns] = ordinal_encoder.fit_transform(train[high_card_columns]) test[high_card_columns] = ordinal_encoder.transform(test[high_card_columns]) OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False) OH_cols_train = pd.DataFrame(OH_encoder.fit_transform(train[low_card_columns])) OH_cols_test = pd.DataFrame(OH_encoder.transform(test[low_card_columns])) OH_cols_train.index = train.index OH_cols_test.index = test.index all_cols = OH_cols_train.columns new_cols = [i for i in all_cols if isinstance(i, (int, float))] OH_cols_train = OH_cols_train[new_cols].add_prefix('cat_encode_') OH_cols_test = OH_cols_test[new_cols].add_prefix('cat_encode_') num_X_train = train.drop(low_card_columns, axis=1) num_X_test = test.drop(low_card_columns, axis=1) train = pd.concat([num_X_train, OH_cols_train], axis=1) test = pd.concat([num_X_test, OH_cols_test], axis=1) useful_features = [c for c in train.columns if c not in ('id', 'target', 'kfold')] Nfeatures = [cname for cname in train.columns if train[cname].dtype in ['int64', 'float64']] train = train[Nfeatures] test = test[Nfeatures] print('Categorical data removed')
code
2008154/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt img_size = 64 channel_size = 1 print('Training shape:', X_train.shape) print(X_train.shape[0], 'sample,', X_train.shape[1], 'x', X_train.shape[2], 'size grayscale image.\n') print('Test shape:', X_test.shape) print(X_test.shape[0], 'sample,', X_test.shape[1], 'x', X_test.shape[2], 'size grayscale image.\n') print('Examples:') n = 10 plt.figure(figsize=(20, 4)) for i in range(1, n + 1): ax = plt.subplot(1, n, i) plt.imshow(X_train[i].reshape(img_size, img_size)) plt.gray() plt.axis('off')
code
2008154/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output from subprocess import check_output print(check_output(['ls', '../input/Sign-language-digits-dataset']).decode('utf8'))
code
90108999/cell_21
[ "text_plain_output_1.png" ]
y_test_temp = y_test.reshape(1, len(y_test))[0] print(type(y_test_temp)) print(y_test_temp.shape)
code
90108999/cell_9
[ "image_output_1.png" ]
import cupy as np import cv2 import matplotlib.pyplot as plt import os image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) train[0]
code
90108999/cell_6
[ "image_output_1.png" ]
import cupy as np import cv2 import matplotlib.pyplot as plt import os image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') plt.figure(figsize=(6, 6)) plt.imshow(np.asnumpy(train[2][0]))
code
90108999/cell_29
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import cupy as np import cv2 import matplotlib.pyplot as plt import os image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) print('classification report of model: \n') print(classification_report(y_np_test, y_np_rfc_predict, target_names=labels))
code
90108999/cell_19
[ "text_plain_output_1.png" ]
x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) print(type(y_rfc_predict)) print(y_rfc_predict.shape)
code
90108999/cell_18
[ "text_plain_output_1.png" ]
x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32'))
code
90108999/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from cuml.metrics.confusion_matrix import confusion_matrix from sklearn import metrics from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import cupy as np import cv2 import matplotlib.pyplot as plt import os import seaborn as sns image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) print("confusion matrix of model: \n") cmap = confusion_matrix(y_np_test, y_np_rfc_predict) plt.figure(figsize = (4, 4), dpi = 150) hm = sns.heatmap(data=cmap,annot=True,fmt='g') pred = cuml_model.predict(x_test) pred = np.asnumpy(pred) fpr, tpr, threshold = metrics.roc_curve(y_np_test, pred) roc_auc = metrics.auc(fpr, tpr) plt.figure(figsize=(6, 4), dpi=150) plt.title('ROC curve') plt.plot(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0, 1]) plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show()
code
90108999/cell_35
[ "text_plain_output_1.png" ]
from cuml.metrics.confusion_matrix import confusion_matrix from sklearn import metrics from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score, confusion_matrix, classification_report from sklearn.metrics import f1_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score import cupy as np import cv2 import matplotlib.pyplot as plt import os import seaborn as sns image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) print("confusion matrix of model: \n") cmap = confusion_matrix(y_np_test, y_np_rfc_predict) plt.figure(figsize = (4, 4), dpi = 150) hm = sns.heatmap(data=cmap,annot=True,fmt='g') pred = cuml_model.predict(x_test) pred = np.asnumpy(pred) fpr, tpr, threshold = metrics.roc_curve(y_np_test, pred) roc_auc = metrics.auc(fpr, tpr) plt.xlim([0, 1]) plt.ylim([0, 1]) accuracy = accuracy_score(y_np_test, y_np_rfc_predict) precision = precision_score(y_np_test, y_np_rfc_predict) recall = recall_score(y_np_test, y_np_rfc_predict) f1 = f1_score(y_np_test, y_np_rfc_predict) roc = roc_auc_score(y_np_test, y_np_rfc_predict) value = [accuracy, precision, recall, f1, roc] labels = ['Accuarcy', 'Precision', 'Recall', 'F1', 'ROC Score'] plt.figure(figsize=(6, 4), dpi=150) plt.bar(labels, value) plt.title('Metrics') plt.show()
code
90108999/cell_24
[ "text_plain_output_1.png" ]
from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import cupy as np import cv2 import matplotlib.pyplot as plt import os image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) accu = accuracy_score(y_np_test, y_np_rfc_predict) print('accuracy of model is: %f' % accu)
code
90108999/cell_14
[ "text_plain_output_1.png" ]
x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) print(x_train.shape) print(x_test.shape) print(x_val.shape)
code
90108999/cell_22
[ "text_plain_output_1.png" ]
import cupy as np import cv2 import matplotlib.pyplot as plt import os image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) print(type(y_np_test)) print(type(y_np_rfc_predict))
code
90108999/cell_27
[ "text_plain_output_1.png" ]
from cuml.metrics.confusion_matrix import confusion_matrix from sklearn.metrics import accuracy_score, confusion_matrix, classification_report import cupy as np import cv2 import matplotlib.pyplot as plt import os import seaborn as sns image_size = 200 labels = ['PNEUMONIA', 'NORMAL'] def data_loader(data_dir): data = list() for label in labels: path = os.path.join(data_dir, label) class_num = labels.index(label) for img in os.listdir(path): img_arr = cv2.imread(os.path.join(path, img), 0) resized_arr = cv2.resize(img_arr, (image_size, image_size)) data.append([resized_arr, class_num]) return data val = data_loader('../input/chest-xray-pneumonia/chest_xray/val') test = data_loader('../input/chest-xray-pneumonia/chest_xray/test') train = data_loader('../input/chest-xray-pneumonia/chest_xray/train') def normalize_list(train): for pair in train: pair[0] = np.array(pair[0]) / 255 return train train = normalize_list(train) test = normalize_list(test) val = normalize_list(val) def make_x_y(data): X = [] Y = [] for pair in data: X.append(pair[0]) Y.append(pair[1]) return (np.array(X), np.array(Y)) x_train, y_train = make_x_y(train) x_test, y_test = make_x_y(test) x_val, y_val = make_x_y(val) x_train = x_train.reshape(len(x_train), 200 * 200) x_test = x_test.reshape(len(x_test), 200 * 200) x_val = x_val.reshape(len(x_val), 200 * 200) cuml_model = cuRFC(max_features=1.0, n_bins=8, n_estimators=40) cuml_model.fit(x_train.astype('float32'), y_train.astype('float32')) y_rfc_predict = cuml_model.predict(x_test.astype('float32')) y_test_temp = y_test.reshape(1, len(y_test))[0] y_np_test = np.asnumpy(y_test_temp) y_np_rfc_predict = np.asnumpy(y_rfc_predict) print('confusion matrix of model: \n') cmap = confusion_matrix(y_np_test, y_np_rfc_predict) plt.figure(figsize=(4, 4), dpi=150) hm = sns.heatmap(data=cmap, annot=True, fmt='g')
code
90108999/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
print(x_train.shape) print(y_train.shape) print(x_test.shape) print(y_test.shape) print(x_val.shape) print(y_val.shape)
code
18146033/cell_9
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): print('Experiment', idx) preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult)
code
18146033/cell_20
[ "text_plain_output_1.png" ]
from keras.applications.densenet import DenseNet121 from keras.layers import (Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D) from keras.models import Model from tqdm import tqdm import cv2 import numpy as np import os import pandas as pd import numpy as np import pandas as pd import os from tqdm import tqdm import PIL import cv2 from PIL import Image, ImageOps from keras.models import Sequential, load_model from keras.layers import Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D from keras.applications.densenet import DenseNet121 import keras from keras.models import Model SIZE = 224 NUM_CLASSES = 1108 train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult) pd.DataFrame(group_plate_probs, index=all_test_exp) exp_to_group = group_plate_probs.argmax(1) def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = DenseNet121(include_top=False, weights=None, input_tensor=input_tensor) x = GlobalAveragePooling2D()(base_model.output) x = Dense(1024, activation='relu')(x) final_output = Dense(n_out, activation='softmax', name='final_output')(x) model = Model(input_tensor, final_output) return model model = create_model(input_shape=(SIZE, SIZE, 3), n_out=NUM_CLASSES) model.load_weights('../input/recursion-cellular-keras-densenet/Densenet121.h5') predicted = [] for i, name in tqdm(enumerate(test_csv['id_code'])): path1 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s1.jpeg') image1 = cv2.imread(path1) score_predict1 = model.predict(image1[np.newaxis] / 255) path2 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s2.jpeg') image2 = cv2.imread(path2) score_predict2 = model.predict(image2[np.newaxis] / 255) predicted.append(0.5 * (score_predict1 + score_predict2)) predicted = np.stack(predicted).squeeze() def select_plate_group(pp_mult, idx): sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) mask = np.repeat(plate_groups[np.newaxis, :, exp_to_group[idx]], len(pp_mult), axis=0) != np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) pp_mult[mask] = 0 return pp_mult for idx in range(len(all_test_exp)): indices = test_csv.experiment == all_test_exp[idx] preds = predicted[indices, :].copy() preds = select_plate_group(preds, idx) sub.loc[indices, 'sirna'] = preds.argmax(1) (sub.sirna == pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv').sirna).mean()
code
18146033/cell_6
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :]
code
18146033/cell_2
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import os from tqdm import tqdm import PIL import cv2 from PIL import Image, ImageOps from keras.models import Sequential, load_model from keras.layers import Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D from keras.applications.densenet import DenseNet121 import keras from keras.models import Model SIZE = 224 NUM_CLASSES = 1108
code
18146033/cell_11
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult) exp_to_group = group_plate_probs.argmax(1) print(exp_to_group)
code
18146033/cell_19
[ "text_plain_output_1.png" ]
from keras.applications.densenet import DenseNet121 from keras.layers import (Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D) from keras.models import Model from tqdm import tqdm import cv2 import numpy as np import os import pandas as pd import numpy as np import pandas as pd import os from tqdm import tqdm import PIL import cv2 from PIL import Image, ImageOps from keras.models import Sequential, load_model from keras.layers import Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D from keras.applications.densenet import DenseNet121 import keras from keras.models import Model SIZE = 224 NUM_CLASSES = 1108 train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult) exp_to_group = group_plate_probs.argmax(1) def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = DenseNet121(include_top=False, weights=None, input_tensor=input_tensor) x = GlobalAveragePooling2D()(base_model.output) x = Dense(1024, activation='relu')(x) final_output = Dense(n_out, activation='softmax', name='final_output')(x) model = Model(input_tensor, final_output) return model model = create_model(input_shape=(SIZE, SIZE, 3), n_out=NUM_CLASSES) model.load_weights('../input/recursion-cellular-keras-densenet/Densenet121.h5') predicted = [] for i, name in tqdm(enumerate(test_csv['id_code'])): path1 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s1.jpeg') image1 = cv2.imread(path1) score_predict1 = model.predict(image1[np.newaxis] / 255) path2 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s2.jpeg') image2 = cv2.imread(path2) score_predict2 = model.predict(image2[np.newaxis] / 255) predicted.append(0.5 * (score_predict1 + score_predict2)) predicted = np.stack(predicted).squeeze() def select_plate_group(pp_mult, idx): sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) mask = np.repeat(plate_groups[np.newaxis, :, exp_to_group[idx]], len(pp_mult), axis=0) != np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) pp_mult[mask] = 0 return pp_mult for idx in range(len(all_test_exp)): print('Experiment', idx) indices = test_csv.experiment == all_test_exp[idx] preds = predicted[indices, :].copy() preds = select_plate_group(preds, idx) sub.loc[indices, 'sirna'] = preds.argmax(1)
code
18146033/cell_16
[ "text_plain_output_1.png" ]
from keras.applications.densenet import DenseNet121 from keras.layers import (Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D) from keras.models import Model from tqdm import tqdm import cv2 import numpy as np import os import pandas as pd import numpy as np import pandas as pd import os from tqdm import tqdm import PIL import cv2 from PIL import Image, ImageOps from keras.models import Sequential, load_model from keras.layers import Activation, Dropout, Flatten, Dense, Input, Conv2D, GlobalAveragePooling2D from keras.applications.densenet import DenseNet121 import keras from keras.models import Model SIZE = 224 NUM_CLASSES = 1108 train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult) def create_model(input_shape, n_out): input_tensor = Input(shape=input_shape) base_model = DenseNet121(include_top=False, weights=None, input_tensor=input_tensor) x = GlobalAveragePooling2D()(base_model.output) x = Dense(1024, activation='relu')(x) final_output = Dense(n_out, activation='softmax', name='final_output')(x) model = Model(input_tensor, final_output) return model model = create_model(input_shape=(SIZE, SIZE, 3), n_out=NUM_CLASSES) model.load_weights('../input/recursion-cellular-keras-densenet/Densenet121.h5') predicted = [] for i, name in tqdm(enumerate(test_csv['id_code'])): path1 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s1.jpeg') image1 = cv2.imread(path1) score_predict1 = model.predict(image1[np.newaxis] / 255) path2 = os.path.join('../input/recursion-cellular-image-classification-224-jpg/test/test/', name + '_s2.jpeg') image2 = cv2.imread(path2) score_predict2 = model.predict(image2[np.newaxis] / 255) predicted.append(0.5 * (score_predict1 + score_predict2))
code
18146033/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose() plate_groups = np.zeros((1108, 4), int) for sirna in range(1108): grp = train_csv.loc[train_csv.sirna == sirna, :].plate.value_counts().index.values assert len(grp) == 3 plate_groups[sirna, 0:3] = grp plate_groups[sirna, 3] = 10 - grp.sum() plate_groups[:10, :] all_test_exp = test_csv.experiment.unique() group_plate_probs = np.zeros((len(all_test_exp), 4)) for idx in range(len(all_test_exp)): preds = sub.loc[test_csv.experiment == all_test_exp[idx], 'sirna'].values pp_mult = np.zeros((len(preds), 1108)) pp_mult[range(len(preds)), preds] = 1 sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :] assert len(pp_mult) == len(sub_test) for j in range(4): mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == np.repeat(sub_test.plate.values[:, np.newaxis], 1108, axis=1) group_plate_probs[idx, j] = np.array(pp_mult)[mask].sum() / len(pp_mult) pd.DataFrame(group_plate_probs, index=all_test_exp)
code
18146033/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd train_csv = pd.read_csv('../input/recursion-cellular-image-classification/train.csv') test_csv = pd.read_csv('../input/recursion-cellular-image-classification/test.csv') sub = pd.read_csv('../input/recursion-cellular-keras-densenet/submission.csv') np.stack([train_csv.plate.values[train_csv.sirna == i] for i in range(10)]).transpose()
code
72062529/cell_7
[ "text_plain_output_1.png" ]
# @title Install dependencies # @markdown Download dataset, modules, and files needed for the tutorial from GitHub. # @markdown Download from OSF. Original repo: https://github.com/colleenjg/neuromatch_ssl_tutorial.git import os, sys, importlib REPO_PATH = "neuromatch_ssl_tutorial" download_str = "Downloading" if os.path.exists(REPO_PATH): download_str = "Redownloading" !rm -rf $REPO_PATH # download from github repo directly #!git clone git://github.com/colleenjg/neuromatch_ssl_tutorial.git --quiet from io import BytesIO from urllib.request import urlopen from zipfile import ZipFile zipurl = 'https://osf.io/smqvg/download' print(f"{download_str} and unzipping the file... Please wait.") with urlopen(zipurl) as zipresp: with ZipFile(BytesIO(zipresp.read())) as zfile: zfile.extractall() print("Download completed!") # @markdown Import modules designed for use in this tutorials from neuromatch_ssl_tutorial.modules import data, load, models, plot_util from neuromatch_ssl_tutorial.modules import data, load, models, plot_util importlib.reload(data); importlib.reload(load); importlib.reload(models) importlib.reload(plot_util); !pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet from evaltools.airtable import AirtableForm # generate airtable form atform = AirtableForm('appn7VdPRseSoMXEG','W3D1_T1','https://portal.neuromatchacademy.org/api/redirect/to/47de6f8f-1265-4a74-88c4-7dfa6e64b35a')
code
72062529/cell_17
[ "text_plain_output_1.png" ]
from IPython.display import IFrame from IPython.display import IFrame from IPython.display import YouTubeVideo from IPython.display import display, Image # to visualize images from ipywidgets import widgets import ipywidgets as widgets # interactive display from ipywidgets import widgets out2 = widgets.Output() with out2: from IPython.display import IFrame class BiliVideo(IFrame): def __init__(self, id, page=1, width=400, height=300, **kwargs): self.id = id src = 'https://player.bilibili.com/player.html?bvid={0}&page={1}'.format(id, page) super(BiliVideo, self).__init__(src, width, height, **kwargs) video = BiliVideo(id=f'BV1D64y1s78e', width=854, height=480, fs=1) print('Video available at https://www.bilibili.com/video/{0}'.format(video.id)) display(video) out1 = widgets.Output() with out1: from IPython.display import YouTubeVideo video = YouTubeVideo(id=f'Q3b_EqFUI00', width=854, height=480, fs=1, rel=0) print('Video available at https://youtube.com/watch?v=' + video.id) display(video) out = widgets.Tab([out1, out2]) out.set_title(0, 'Youtube') out.set_title(1, 'Bilibili') atform.add_event('Video 0: Introduction') display(out)
code
72062529/cell_14
[ "text_html_output_1.png" ]
from neuromatch_ssl_tutorial.modules import data, load, models, plot_util from neuromatch_ssl_tutorial.modules import data, load, models, plot_util import matplotlib.pyplot as plt import numpy as np import random import torch import torch # @title Plotting functions # @markdown Function to plot a histogram of RSM values: `plot_rsm_histogram(rsms, colors)` def plot_rsm_histogram(rsms, colors, labels=None, nbins=100): fig, ax = plt.subplots(1) ax.set_title("Histogram of RSM values", y=1.05) min_val = np.min([np.nanmin(rsm) for rsm in rsms]) max_val = np.max([np.nanmax(rsm) for rsm in rsms]) bins = np.linspace(min_val, max_val, nbins+1) if labels is None: labels = [labels] * len(rsms) elif len(labels) != len(rsms): raise ValueError("If providing labels, must provide as many as RSMs.") if len(rsms) != len(colors): raise ValueError("Must provide as may colors as RSMs.") for r, rsm in enumerate(rsms): ax.hist( rsm.reshape(-1), bins, density=True, alpha=0.4, color=colors[r], label=labels[r] ) ax.axvline(x=0, ls="dashed", alpha=0.6, color="k") ax.set_ylabel("Density") ax.set_xlabel("Similarity values") ax.legend() plt.show() from IPython.display import display, Image def test_custom_torch_RSM_fct(custom_torch_RSM_fct): rand_feats = torch.rand(100, 1000) RSM_custom = custom_torch_RSM_fct(rand_feats) RSM_ground_truth = data.calculate_torch_RSM(rand_feats) def test_custom_contrastive_loss_fct(custom_simclr_contrastive_loss): rand_proj_feat1 = torch.rand(100, 1000) rand_proj_feat2 = torch.rand(100, 1000) loss_custom = custom_simclr_contrastive_loss(rand_proj_feat1, rand_proj_feat2) loss_ground_truth = models.contrastive_loss(rand_proj_feat1, rand_proj_feat2) import random import torch def set_seed(seed=None, seed_torch=True): if seed is None: seed = np.random.choice(2 ** 32) random.seed(seed) np.random.seed(seed) if seed_torch: torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.cuda.manual_seed(seed) torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2 ** 32 np.random.seed(worker_seed) random.seed(worker_seed) def set_device(): device = 'cuda' if torch.cuda.is_available() else 'cpu' return device SEED = 2021 set_seed(seed=SEED) DEVICE = set_device()
code
72062529/cell_5
[ "text_plain_output_1.png" ]
from IPython.display import IFrame from IPython.display import IFrame IFrame(src=f'https://mfr.ca-1.osf.io/render?url=https://osf.io/wvt34/?direct%26mode=render%26action=download%26mode=render', width=854, height=480)
code
16112056/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum() health_df1 = health_df1.drop(['Indicator', 'Place', 'BCHC Requested Methodology', 'Source', 'Methods', 'Notes'], axis=1) plt.figure(figsize=(15, 5)) ax = sns.barplot(x='Indicator Category', y='Value', data=health_df1) ax.set_title('Indicator Category vs Value') plt.xlabel('Indicator Category') plt.ylabel('Value') plt.show(ax)
code
16112056/cell_4
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum() health_df1.info()
code
16112056/cell_2
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum()
code
16112056/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum() health_df1 = health_df1.drop(['Indicator', 'Place', 'BCHC Requested Methodology', 'Source', 'Methods', 'Notes'], axis=1) plt.figure(figsize=(15,5)) ax = sns.barplot(x='Indicator Category',y='Value', data=health_df1) ax.set_title('Indicator Category vs Value') plt.xlabel("Indicator Category") plt.ylabel('Value') plt.show(ax) plt.figure(figsize=(15,5)) ax = sns.barplot(x='Indicator Category',y='Value', hue='Gender',data=health_df1) ax.set_title('Indicator Category vs Value by Gender') plt.xlabel("Indicator Category") plt.ylabel('Value') plt.show(ax) plt.figure(figsize=(15, 5)) ax = sns.barplot(x='State', y='Value', hue='Gender', data=health_df1) ax.set_title('State vs Value by Gender') plt.xlabel('State') plt.ylabel('Value') plt.show(ax)
code
16112056/cell_1
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os print(os.listdir('../input')) health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.head(3)
code
16112056/cell_8
[ "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum() health_df1 = health_df1.drop(['Indicator', 'Place', 'BCHC Requested Methodology', 'Source', 'Methods', 'Notes'], axis=1) health_df1.head()
code
16112056/cell_3
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum()
code
16112056/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import os health_df = pd.read_csv('../input/Big_Cities_Health_Data_Inventory.csv') health_df1 = health_df.copy() health_df1.duplicated().sum() health_df1.drop_duplicates(inplace=True) health_df1.duplicated().sum() health_df1 = health_df1.drop(['Indicator', 'Place', 'BCHC Requested Methodology', 'Source', 'Methods', 'Notes'], axis=1) plt.figure(figsize=(15,5)) ax = sns.barplot(x='Indicator Category',y='Value', data=health_df1) ax.set_title('Indicator Category vs Value') plt.xlabel("Indicator Category") plt.ylabel('Value') plt.show(ax) plt.figure(figsize=(15, 5)) ax = sns.barplot(x='Indicator Category', y='Value', hue='Gender', data=health_df1) ax.set_title('Indicator Category vs Value by Gender') plt.xlabel('Indicator Category') plt.ylabel('Value') plt.show(ax)
code
128011806/cell_4
[ "text_plain_output_1.png" ]
model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary()
code
128011806/cell_34
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np import tensorflow as tf model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary() class_names = ['happy', 'sad'] data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape normalized_image_array = image_array.astype(np.float32) / 127.5 - 1 data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape prediction = model.predict(data_tensor) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index] image2 = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/happy/PrivateTest_61167984.jpg').convert('RGB') gray_image = image2.convert('L') size = (48, 48) image2 = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array2 = np.asarray(image2) image_array2.shape normalized_image_array2 = image_array2.astype(np.float32) / 127.5 - 1 data_tensor2 = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor2 = tf.expand_dims(data_tensor2, axis=0) data_tensor2.shape prediction = model.predict(data_tensor2) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index] print('Class:', class_name, end=' ') print('Confidence Score:', confidence_score)
code
128011806/cell_33
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np import tensorflow as tf model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary() class_names = ['happy', 'sad'] data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape normalized_image_array = image_array.astype(np.float32) / 127.5 - 1 data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape prediction = model.predict(data_tensor) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index] image2 = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/happy/PrivateTest_61167984.jpg').convert('RGB') gray_image = image2.convert('L') size = (48, 48) image2 = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array2 = np.asarray(image2) image_array2.shape normalized_image_array2 = image_array2.astype(np.float32) / 127.5 - 1 data_tensor2 = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor2 = tf.expand_dims(data_tensor2, axis=0) data_tensor2.shape prediction = model.predict(data_tensor2) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index]
code
128011806/cell_19
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np import tensorflow as tf model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary() class_names = ['happy', 'sad'] data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape normalized_image_array = image_array.astype(np.float32) / 127.5 - 1 data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape prediction = model.predict(data_tensor) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index] print('Class:', class_name, end=' ') print('Confidence Score:', confidence_score)
code
128011806/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.models import load_model from PIL import Image, ImageOps import numpy as np import tensorflow as tf
code
128011806/cell_18
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np import tensorflow as tf model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary() class_names = ['happy', 'sad'] data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape normalized_image_array = image_array.astype(np.float32) / 127.5 - 1 data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape prediction = model.predict(data_tensor) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index]
code
128011806/cell_32
[ "text_plain_output_1.png" ]
import numpy as np import tensorflow as tf data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor2 = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor2 = tf.expand_dims(data_tensor2, axis=0) data_tensor2.shape
code
128011806/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import tensorflow as tf data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape
code
128011806/cell_27
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np import tensorflow as tf model = load_model('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/model_3.h5', compile=False) model.summary() class_names = ['happy', 'sad'] data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape normalized_image_array = image_array.astype(np.float32) / 127.5 - 1 data_tensor = tf.convert_to_tensor(data, dtype=tf.float32) data_tensor = tf.expand_dims(data_tensor, axis=0) data_tensor.shape prediction = model.predict(data_tensor) index = np.argmax(prediction) class_name = class_names[index] confidence_score = prediction[0][index] image2 = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/happy/PrivateTest_61167984.jpg').convert('RGB') gray_image = image2.convert('L') size = (48, 48) image2 = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array2 = np.asarray(image2) image_array2.shape
code
128011806/cell_12
[ "text_plain_output_1.png" ]
from PIL import Image, ImageOps # Install pillow instead of PIL import numpy as np data = np.ndarray(shape=(48, 48, 1), dtype=np.float32) image = Image.open('/kaggle/input/ahsan-model-3-testing-implementation/from_kaggle/dataset/sad/PrivateTest_568359.jpg').convert('RGB') gray_image = image.convert('L') size = (48, 48) image = ImageOps.fit(gray_image, size, Image.Resampling.LANCZOS) image_array = np.asarray(image) image_array.shape
code
122253838/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd url = 'https://data.boston.gov/api/3/action/datastore_search?resource_id=c13199bf-49a1-488d-b8e9-55e49523ef81&limit=90000' js = pd.read_json(url) df = pd.DataFrame(js['result']['records']) df = df.set_index('timestamp') df.columns = df.columns.str.lower() df = df.drop(['_id'], axis=1) df.index = df.index.astype('datetime64[ns]') df.index = df.index - pd.tseries.offsets.Day() df.index = pd.to_datetime(df.index.date) df['usage'] = df['usage'].astype('int32') df.info()
code
73083713/cell_21
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum() customer_data_cleaned = customer_data.dropna() from datetime import date def get_age(birthyear): return date.today().year - birthyear ages = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned['Age'] = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned.sort_values(by='Year_Birth').head() customer_data_cleaned.drop([11004, 1150, 7829], inplace=True) customer_data_cleaned['Spending'] = customer_data_cleaned.MntWines + customer_data_cleaned.MntFruits + customer_data_cleaned.MntMeatProducts + customer_data_cleaned.MntFishProducts + customer_data_cleaned.MntSweetProducts + customer_data_cleaned.MntGoldProds customer_data_cleaned['Time_With_Company'] = pd.to_datetime(customer_data_cleaned.Dt_Customer, dayfirst=True, format='%d-%m-%Y') customer_data_cleaned['Time_With_Company'] = pd.to_numeric(customer_data_cleaned.Time_With_Company.dt.date.apply(lambda z: date.today() - z).dt.days, downcast='integer') / 30 customer_data_cleaned.Education.unique()
code
73083713/cell_13
[ "text_html_output_1.png" ]
from datetime import date import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum() customer_data_cleaned = customer_data.dropna() from datetime import date def get_age(birthyear): return date.today().year - birthyear ages = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned['Age'] = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned.Age.describe()
code
73083713/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum()
code
73083713/cell_23
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum() customer_data_cleaned = customer_data.dropna() from datetime import date def get_age(birthyear): return date.today().year - birthyear ages = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned['Age'] = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned.sort_values(by='Year_Birth').head() customer_data_cleaned.drop([11004, 1150, 7829], inplace=True) customer_data_cleaned['Spending'] = customer_data_cleaned.MntWines + customer_data_cleaned.MntFruits + customer_data_cleaned.MntMeatProducts + customer_data_cleaned.MntFishProducts + customer_data_cleaned.MntSweetProducts + customer_data_cleaned.MntGoldProds customer_data_cleaned['Time_With_Company'] = pd.to_datetime(customer_data_cleaned.Dt_Customer, dayfirst=True, format='%d-%m-%Y') customer_data_cleaned['Time_With_Company'] = pd.to_numeric(customer_data_cleaned.Time_With_Company.dt.date.apply(lambda z: date.today() - z).dt.days, downcast='integer') / 30 customer_data_cleaned.Education.unique() customer_data_cleaned.Marital_Status.unique()
code
73083713/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.head()
code
73083713/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape
code
73083713/cell_32
[ "text_html_output_1.png" ]
from datetime import date import numpy as np import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum() customer_data_cleaned = customer_data.dropna() from datetime import date def get_age(birthyear): return date.today().year - birthyear ages = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned['Age'] = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned.sort_values(by='Year_Birth').head() customer_data_cleaned.drop([11004, 1150, 7829], inplace=True) customer_data_cleaned['Spending'] = customer_data_cleaned.MntWines + customer_data_cleaned.MntFruits + customer_data_cleaned.MntMeatProducts + customer_data_cleaned.MntFishProducts + customer_data_cleaned.MntSweetProducts + customer_data_cleaned.MntGoldProds customer_data_cleaned['Time_With_Company'] = pd.to_datetime(customer_data_cleaned.Dt_Customer, dayfirst=True, format='%d-%m-%Y') customer_data_cleaned['Time_With_Company'] = pd.to_numeric(customer_data_cleaned.Time_With_Company.dt.date.apply(lambda z: date.today() - z).dt.days, downcast='integer') / 30 customer_data_cleaned.Education.unique() customer_data_cleaned.Marital_Status.unique() customer_data_cleaned.Marital_Status = customer_data_cleaned.Marital_Status.replace({'Divorced': 'Single', 'Together': 'Partner', 'Married': 'Partner', 'Widow': 'Single', 'Alone': 'Single', 'Absurd': 'Single', 'YOLO': 'Single'}) customer_data_cleaned['Children'] = customer_data_cleaned.Kidhome + customer_data_cleaned.Teenhome customer_data_cleaned['Has_Child'] = np.where(customer_data_cleaned.Children > 0, 'Has Child', 'No Child') customer_data_cleaned = customer_data_cleaned.rename(columns={'MntWines': 'Wine', 'MntFruits': 'Fruit', 'MntMeatProducts': 'Meat', 'MntFishProducts': 'Fish', 'MntSweetProducts': 'Sweets', 'MntGoldProds': 'Gold'}) customer_data_cleaned = customer_data_cleaned.rename(columns={'NumWebPurchases': 'Web', 'NumCatalogPurchases': 'Catalog', 'NumStorePurchases': 'Store', 'NumWebVisitsMonth': 'WebVisits'}) customer_data_cleaned.Web.describe()
code
73083713/cell_15
[ "text_plain_output_1.png" ]
from datetime import date import pandas as pd customer_data = pd.read_csv('../input/customer-personality-analysis/marketing_campaign.csv', sep='\t', index_col='ID') customer_data.shape customer_data.isnull().sum() customer_data_cleaned = customer_data.dropna() from datetime import date def get_age(birthyear): return date.today().year - birthyear ages = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned['Age'] = customer_data_cleaned.Year_Birth.map(get_age) customer_data_cleaned.sort_values(by='Year_Birth').head()
code
33104665/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull())) plt.figure(figsize = (16,8)) #Let's verify the correlation of each value ax = sb.heatmap(Ydata[['views', 'likes', 'dislikes', 'comment_count']].corr(), \ annot=True, annot_kws={"size": 20}, cmap=cm.coolwarm, linewidths=0.5, linecolor='black') plt.yticks(rotation=30, fontsize=20) plt.xticks(rotation=30, fontsize=20) plt.title("\nCorrelation between views, likes, dislikes & comments\n", fontsize=25) plt.show() colors = ['#FF6600', '#FFCCCC'] labels = ('likes', 'dislikes') plt.suptitle('Information on data_split', fontsize=20) data['Ydata'].value_counts().plot.pie(autopct='%1.2f%%', shadow=True, colors=colors, labels=labels, fontsize=12, startangle=70)
code
33104665/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull())) Ydata['trending_date'].head()
code
33104665/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull())) plt.figure(figsize=(16, 8)) ax = sb.heatmap(Ydata[['views', 'likes', 'dislikes', 'comment_count']].corr(), annot=True, annot_kws={'size': 20}, cmap=cm.coolwarm, linewidths=0.5, linecolor='black') plt.yticks(rotation=30, fontsize=20) plt.xticks(rotation=30, fontsize=20) plt.title('\nCorrelation between views, likes, dislikes & comments\n', fontsize=25) plt.show()
code
33104665/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33104665/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.info()
code
33104665/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull()))
code
33104665/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull())) column_list = ['views', 'likes', 'dislikes', 'comment_count'] corr_matrix = Ydata[column_list].corr() corr_matrix
code
33104665/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.apply(lambda x: sum(x.isnull())) sns.countplot(x='likes', data=Ydata)
code
33104665/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) Ydata = pd.read_csv('../input/youtube-new/USvideos.csv') original_data = Ydata.copy() Ydata.head()
code
50244797/cell_13
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re variant = pd.read_csv('/kaggle/input/msk-redefining-cancer-treatment/training_variants.zip') text_data = pd.read_csv('/kaggle/input/msk-redefining-cancer-treatment/training_text.zip', sep='\\|\\|', engine='python', names=['ID', 'TEXT'], skiprows=1) from nltk.corpus import stopwords stop_words = set(stopwords.words('english')) import re def cleaning(text, index, column): if type(text) is not int: string = '' text = re.sub('[^a-zA-Z0-9\n]', ' ', text) text = re.sub('\\s+', ' ', text) text = text.lower() for word in text.split(): if word not in stop_words: string += word + ' ' text_data[column][index] = string for index, row in text_data.iterrows(): if type(row['TEXT']) is str: cleaning(row['TEXT'], index, 'TEXT') result = pd.merge(variant, text_data, on='ID', how='left') result[result.isnull().any(axis=1)]
code