path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
89130056/cell_9
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import re import zipfile import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') file_path = '../input/painter-by-numbers/' archive = zipfile.ZipFile(file_path + 'replacements_for_corrupted_files.zip', 'r') corrupted_ids = set() for item in archive.namelist(): ID = re.sub('[^0-9]', '', item) if ID != '': corrupted_ids.add(ID) drop_idx = [] for index, row in df.iterrows(): id_check = re.sub('[^0-9]', '', row['new_filename']) if id_check in corrupted_ids: drop_idx.append(index) df = df.drop(drop_idx) painter_dict = {'Kandinsky': '', 'Dali': '', 'Picasso': '', 'Delacroix': '', 'Rembrandt': '', 'Gogh': '', 'Kuniyoshi': '', 'Dore': '', 'Steinlen': '', 'Saryan': '', 'Goya': '', 'Lautrec': '', 'Modigliani': '', 'Beksinski': '', 'Pissarro': '', 'Kirchner': '', 'Renoir': '', 'Piranesi': '', 'Degas': '', 'Chagall': ''} paintings_dict = painter_dict.copy() for artist in painter_dict: for painter in df['artist']: if artist in painter: painter_dict[artist] = painter paintings = df[df['artist'] == painter].shape[0] paintings_dict[artist] = paintings break for artist in painter_dict: print(f'The artist named {painter_dict[artist]} has a total of {paintings_dict[artist]} paintings in this dataset.') sample_size = min(paintings_dict.values()) min_a = list(paintings_dict.keys())[list(paintings_dict.values()).index(sample_size)] print(f'\nThe artist with the smallest number of paintings is {min_a} with {sample_size} paintings.')
code
89130056/cell_25
[ "text_plain_output_1.png" ]
nn_data = ImageDataset(file_path, active_df, LabEnc, img_size=224, normalize=True, crop=False)
code
89130056/cell_34
[ "image_output_1.png" ]
hog_data = ImageData(file_path, active_df, LabEnc, hog_mode=[9, (8, 8), (2, 2)], sift_mode=False, img_size=224)
code
89130056/cell_29
[ "text_plain_output_4.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_2.png", "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from PIL import Image from matplotlib.colors import ListedColormap, LinearSegmentedColormap from mpl_toolkits.axes_grid1 import make_axes_locatable from skimage.feature import hog from sklearn import preprocessing from sklearn.metrics import confusion_matrix from sklearn.model_selection import train_test_split from torch.utils.data import Dataset from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models import cv2 import itertools import matplotlib import matplotlib.pyplot as plt import numpy as np import pandas as pd import re import seaborn as sns import torch import torch import zipfile import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') file_path = '../input/painter-by-numbers/' archive = zipfile.ZipFile(file_path + 'replacements_for_corrupted_files.zip', 'r') corrupted_ids = set() for item in archive.namelist(): ID = re.sub('[^0-9]', '', item) if ID != '': corrupted_ids.add(ID) drop_idx = [] for index, row in df.iterrows(): id_check = re.sub('[^0-9]', '', row['new_filename']) if id_check in corrupted_ids: drop_idx.append(index) df = df.drop(drop_idx) painter_dict = {'Kandinsky': '', 'Dali': '', 'Picasso': '', 'Delacroix': '', 'Rembrandt': '', 'Gogh': '', 'Kuniyoshi': '', 'Dore': '', 'Steinlen': '', 'Saryan': '', 'Goya': '', 'Lautrec': '', 'Modigliani': '', 'Beksinski': '', 'Pissarro': '', 'Kirchner': '', 'Renoir': '', 'Piranesi': '', 'Degas': '', 'Chagall': ''} paintings_dict = painter_dict.copy() for artist in painter_dict: for painter in df['artist']: if artist in painter: painter_dict[artist] = painter paintings = df[df['artist'] == painter].shape[0] paintings_dict[artist] = paintings break sample_size = min(paintings_dict.values()) min_a = list(paintings_dict.keys())[list(paintings_dict.values()).index(sample_size)] active_df = pd.DataFrame({}) for artist in painter_dict.values(): tr_df = df[df['artist'] == artist].sort_values(by=['in_train', 'size_bytes'], ascending=[False, True]) active_df = pd.concat([active_df, tr_df.iloc[:sample_size]]) artists = list(painter_dict.values()) LabEnc = preprocessing.LabelEncoder() LabEnc.fit(artists) matplotlib.rc_file_defaults() def image_transformer_nn(image, apply_norm=True, crop_img=True, new_dim=224): """ Args: resize_num (int): Dimension (pixels) to resize image apply_norm (bool): Choose whether to apply the normalization or not crop_img (bool): Choose whether to resize the image into the new_dim size, or crop a square from its center, sized new_dim x new_dim """ if crop_img: cropper = transforms.CenterCrop(new_dim) image = cropper(image) tensoring = transforms.ToTensor() image = tensoring(image) channels, height, width = image.shape if image.shape[0] < 3: image = image.expand(3, -1, -1) if image.shape[0] > 3: image = image[0:3, :, :] if apply_norm: normalizer = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) image = normalizer(image) if not crop_img: if width < height: image = image.transpose(1, 2) channels, height, width = image.shape res_percent = float(new_dim / width) height = round(height * res_percent) resizer = transforms.Resize((height, new_dim)) image = resizer(image) padder = transforms.Pad([0, 0, 0, int(new_dim - height)]) image = padder(image) return image archive = zipfile.ZipFile(file_path + 'train.zip', 'r') img_path = 'train/' imgdata = archive.open(img_path + '69382.jpg') image = Image.open(imgdata) image2 = image_transformer_nn(image, apply_norm=False, crop_img=True, new_dim=224) image3a = image_transformer_nn(image, apply_norm=False, crop_img=False, new_dim=224) image3b = image_transformer_nn(image, apply_norm=True, crop_img=False, new_dim=224) import torch from torch.utils.data import Dataset class ImageDataset(Dataset): def __init__(self, path, dataframe, lab_encoder, img_size=224, normalize=True, crop=False): """ Args: path (string): Where to look for the files to extract dataframe (pd.DataFrame): dataframe to use for the IDs lab_encoder: label encoder to transform artist names into integers img_size (int): size to be used normalize (bool): perform normalization during transformation or not crop (bool): True: crop only a center from the image False: Resize image with respect to aspect ratio and pad """ self.encoder = lab_encoder self.img_size = img_size self.normalize = normalize self.crop = crop self.feats, self.labels = self.get_all_items(path, dataframe) def get_all_items(self, path, dataframe): curr_df = dataframe[dataframe['in_train'] == True] archive = zipfile.ZipFile(path + 'train.zip', 'r') img_path = 'train/' feats = [] labels = [] for index, row in curr_df.iterrows(): file = row['new_filename'] imgdata = archive.open(img_path + file) try: image = Image.open(imgdata) datum = image_transformer_nn(image, apply_norm=self.normalize, crop_img=self.crop, new_dim=self.img_size) feats.append(datum) artist = row['artist'] label = self.encoder.transform([artist])[0] labels.append(label) except Image.DecompressionBombError: curr_df = dataframe[dataframe['in_train'] == False] archive = zipfile.ZipFile(path + 'test.zip', 'r') img_path = 'test/' for index, row in curr_df.iterrows(): file = row['new_filename'] imgdata = archive.open(img_path + file) try: image = Image.open(imgdata) datum = image_transformer_nn(image, apply_norm=self.normalize, crop_img=self.crop, new_dim=self.img_size) feats.append(datum) artist = row['artist'] label = self.encoder.transform([artist])[0] labels.append(label) except Image.DecompressionBombError: feats = torch.stack(feats) labels = torch.LongTensor(labels) return (feats, labels) def __len__(self): return len(self.labels) def __getitem__(self, item): return (self.feats[item], self.labels[item]) def ImageData(path, dataframe, lab_encoder, hog_mode, sift_mode, img_size=224): curr_df = dataframe[dataframe['in_train'] == True] archive = zipfile.ZipFile(path + 'train.zip', 'r') img_path = 'train/' PaintFeats = [] PaintLabels = [] for index, row in curr_df.iterrows(): file = row['new_filename'] imgdata = archive.open(img_path + file) try: image = Image.open(imgdata) datum = image_transformer_nn(image, apply_norm=False, crop_img=False, new_dim=img_size) np_datum = datum.numpy().transpose(1, 2, 0) if hog_mode: orients, ppc, cpb = (hog_mode[0], hog_mode[1], hog_mode[2]) datum = hog(np_datum, orientations=orients, pixels_per_cell=ppc, cells_per_block=cpb, feature_vector=True, channel_axis=2) PaintFeats.append(datum) elif sift_mode: np_datum = cv2.normalize(np_datum, None, 0, 255, cv2.NORM_MINMAX).astype('uint8') imgtogray = cv2.cvtColor(np_datum, cv2.COLOR_BGR2GRAY) PaintFeats.append(imgtogray) artist = row['artist'] label = lab_encoder.transform([artist])[0] PaintLabels.append(label) except Image.DecompressionBombError: curr_df = dataframe[dataframe['in_train'] == False] archive = zipfile.ZipFile(path + 'test.zip', 'r') img_path = 'test/' for index, row in curr_df.iterrows(): file = row['new_filename'] imgdata = archive.open(img_path + file) try: image = Image.open(imgdata) datum = image_transformer_nn(image, apply_norm=False, crop_img=False, new_dim=img_size) np_datum = datum.numpy().transpose(1, 2, 0) if hog_mode: orients, ppc, cpb = (hog_mode[0], hog_mode[1], hog_mode[2]) datum = hog(np_datum, orientations=orients, pixels_per_cell=ppc, cells_per_block=cpb, feature_vector=True, channel_axis=2) PaintFeats.append(datum) elif sift_mode: np_datum = cv2.normalize(np_datum, None, 0, 255, cv2.NORM_MINMAX).astype('uint8') imgtogray = cv2.cvtColor(np_datum, cv2.COLOR_BGR2GRAY) PaintFeats.append(imgtogray) artist = row['artist'] label = lab_encoder.transform([artist])[0] PaintLabels.append(label) except Image.DecompressionBombError: return (np.asarray(PaintFeats), np.asarray(PaintLabels)) def DataSplitter(data, ratios=[60, 20, 20], need_val=True, batches=None, shuffle=True, seed=None): """ Args: data (Dataset or List): In the case of NN, it's the dataset to be loaded into loaders. In the case of other models, it's a list containing the Features and Labels lists batches (int): batch size for loaders in case of NN ratios (list): list of integers, containing the ratios [train,val,test] for splitting example: [60,25,15] means 60% train data, 25% val data and 15% test data shuffle (bool): option to shuffle data seed (None or int): seed for shuffling """ first_ratio = (ratios[1] + ratios[2]) / sum(ratios) second_ratio = ratios[2] / (ratios[1] + ratios[2]) if isinstance(data, ImageDataset): labels = data.labels.numpy() train_indices, rest_indices = train_test_split(np.arange(len(labels)), test_size=first_ratio, shuffle=shuffle, random_state=seed, stratify=labels) rest_labels = data[rest_indices][1] val_indices, test_indices = train_test_split(rest_indices, test_size=second_ratio, shuffle=shuffle, random_state=seed, stratify=rest_labels) train_sampler = SubsetRandomSampler(train_indices) val_sampler = SubsetRandomSampler(val_indices) test_sampler = SubsetRandomSampler(test_indices) train_loader = DataLoader(data, batch_size=batches, sampler=train_sampler) val_loader = DataLoader(data, batch_size=batches, sampler=val_sampler) test_loader = DataLoader(data, batch_size=batches, sampler=test_sampler) return (train_loader, val_loader, test_loader) elif isinstance(data, tuple): X_train, X_rest, y_train, y_rest = train_test_split(data[0], data[1], test_size=first_ratio, shuffle=shuffle, random_state=seed, stratify=data[1]) if need_val: X_val, X_test, y_val, y_test = train_test_split(X_rest, y_rest, test_size=second_ratio, shuffle=shuffle, random_state=seed, stratify=y_rest) return (X_train, X_val, X_test, y_train, y_val, y_test) return (X_train, X_rest, y_train, y_rest) else: return sns.set(style = "darkgrid") # Personal preference def CustomCmap(from_rgb,to_rgb): # from color r,g,b r1,g1,b1 = from_rgb # to color r,g,b r2,g2,b2 = to_rgb cdict = {'red': ((0, r1, r1), (1, r2, r2)), 'green': ((0, g1, g1), (1, g2, g2)), 'blue': ((0, b1, b1), (1, b2, b2))} cmap = LinearSegmentedColormap('custom_cmap', cdict) return cmap mycmap = CustomCmap([1.0, 1.0, 1.0], [72/255, 99/255, 147/255]) mycmap_r = CustomCmap([72/255, 99/255, 147/255], [1.0, 1.0, 1.0]) mycol = (72/255, 99/255, 147/255) mycomplcol = (129/255, 143/255, 163/255) def plot_cm(cfmatrix,title,classes): fig, ax1 = plt.subplots(1,1) #, figsize=(5,5) for ax,cm in zip([ax1],[cfmatrix]): im = ax.imshow(cm, interpolation='nearest', cmap=mycmap) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=.2) plt.colorbar(im, cax=cax) #, ticks=[-1,-0.5,0,0.5,1] ax.set_title(title,fontsize=14) tick_marks = np.arange(len(classes)) ax.set_xticks(tick_marks) ax.set_xticklabels(classes, rotation=90) ax.set_yticks(tick_marks) ax.set_yticklabels(classes) fmt = 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): ax.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") ax.set_ylabel('True label',fontsize=14) ax.set_xlabel('Predicted label',fontsize=14) plt.savefig(title+'.pdf', bbox_inches='tight') plt.show() matplotlib.rc_file_defaults() cfmatrix = confusion_matrix(y_true, y_pred) plot_cm(cfmatrix, 'CNN Confusion Matrix', artists)
code
89130056/cell_28
[ "text_plain_output_1.png" ]
input_height = nn_data[0][0].shape[1] input_width = nn_data[0][0].shape[2] conv_channels = [nn_data[0][0].shape[0], 4, 16, 64, 128] kernels = [3, 3, 3, 3] maxpools = [2, 2, 2, 2] lin_channels = [256, 128, 20] dropout = 0.25 learning_rate = 1e-05 weight_decay = 1e-06 patience = 10 verbose_ct = 1 epochs = 2500 model = CNNBackbone(input_height=input_height, input_width=input_width, conv_channels=conv_channels, kernels=kernels, maxpools=maxpools, lin_channels=lin_channels, dropout=dropout, batchnorm=True) model.to(device) loss_function = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay) t_losses, v_losses = train(model, train_loader, val_loader, optimizer, epochs, device=device, patience=patience, verbose_ct=verbose_ct) plot_losses(t_losses, v_losses, 'CNN_Training_Loss.pdf') predictions, labels = evaluate(model, test_loader, device=device) y_true = np.concatenate(labels, axis=0) y_pred = np.concatenate(predictions, axis=0) print(classification_report(y_true, y_pred))
code
89130056/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from PIL import Image from torchvision import transforms, models import matplotlib import matplotlib.pyplot as plt import pandas as pd import re import zipfile import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') file_path = '../input/painter-by-numbers/' archive = zipfile.ZipFile(file_path + 'replacements_for_corrupted_files.zip', 'r') corrupted_ids = set() for item in archive.namelist(): ID = re.sub('[^0-9]', '', item) if ID != '': corrupted_ids.add(ID) drop_idx = [] for index, row in df.iterrows(): id_check = re.sub('[^0-9]', '', row['new_filename']) if id_check in corrupted_ids: drop_idx.append(index) df = df.drop(drop_idx) matplotlib.rc_file_defaults() def image_transformer_nn(image, apply_norm=True, crop_img=True, new_dim=224): """ Args: resize_num (int): Dimension (pixels) to resize image apply_norm (bool): Choose whether to apply the normalization or not crop_img (bool): Choose whether to resize the image into the new_dim size, or crop a square from its center, sized new_dim x new_dim """ if crop_img: cropper = transforms.CenterCrop(new_dim) image = cropper(image) tensoring = transforms.ToTensor() image = tensoring(image) channels, height, width = image.shape if image.shape[0] < 3: image = image.expand(3, -1, -1) if image.shape[0] > 3: image = image[0:3, :, :] if apply_norm: normalizer = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) image = normalizer(image) if not crop_img: if width < height: image = image.transpose(1, 2) channels, height, width = image.shape res_percent = float(new_dim / width) height = round(height * res_percent) resizer = transforms.Resize((height, new_dim)) image = resizer(image) padder = transforms.Pad([0, 0, 0, int(new_dim - height)]) image = padder(image) return image archive = zipfile.ZipFile(file_path + 'train.zip', 'r') img_path = 'train/' imgdata = archive.open(img_path + '69382.jpg') image = Image.open(imgdata) print('This is the original image:\n') plt.imshow(image) plt.show() print('This is the cropped part of the transformed image:\n') image2 = image_transformer_nn(image, apply_norm=False, crop_img=True, new_dim=224) plt.imshow(image2.numpy().transpose(1, 2, 0)) plt.show() print('This is the transformed, resized image:\n') image3a = image_transformer_nn(image, apply_norm=False, crop_img=False, new_dim=224) plt.imshow(image3a.numpy().transpose(1, 2, 0)) plt.show() print('Transformed and resized, but with normalization as well:\n') image3b = image_transformer_nn(image, apply_norm=True, crop_img=False, new_dim=224) plt.imshow(image3b.numpy().transpose(1, 2, 0)) plt.show()
code
89130056/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') df.head()
code
89130056/cell_31
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
learning_rate = 5e-05 weight_decay = 1e-06 patience = 10 verbose_ct = 1 epochs = 2500 model_conv = models.resnet18(pretrained=True) num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs, 256) model_conv.fc2 = nn.Linear(256, 20) model_conv.sfact = nn.Softmax(1) model_conv = model_conv.to(device) loss_function = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model_conv.parameters(), lr=learning_rate, weight_decay=weight_decay) t_losses, v_losses = train(model_conv, train_loader, val_loader, optimizer, epochs, device=device, patience=patience, verbose_ct=verbose_ct) sns.set(style='darkgrid') plot_losses(t_losses, v_losses, 'CNN_Training_Loss_transfer.pdf') torch.save(model_conv.state_dict(), 'ResNet-Trained.pt') predictions, labels = evaluate(model_conv, test_loader, device=device) y_true = np.concatenate(labels, axis=0) y_pred = np.concatenate(predictions, axis=0) print(classification_report(y_true, y_pred)) matplotlib.rc_file_defaults() cfmatrix = confusion_matrix(y_true, y_pred) plot_cm(cfmatrix, 'CNN Confusion Matrix - Transfer', artists)
code
89130056/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import itertools import zipfile import re import numpy as np import pandas as pd import seaborn as sns import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import ListedColormap, LinearSegmentedColormap import cv2 from PIL import Image from skimage.feature import hog from sklearn import preprocessing from sklearn import svm from sklearn.cluster import KMeans from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report from sklearn.model_selection import train_test_split import torch import torch.nn as nn from torch.utils.data import Dataset from torch.utils.data import SubsetRandomSampler, DataLoader from torchvision import transforms, models path = '../input/painter-by-numbers/' df = pd.read_csv(path + 'all_data_info.csv') print(f"The full dataset contains a total of {len(df['artist'].unique())} different artists and {len(df['genre'].unique())} unique painting genres.\n") ash = 5 print(f'The {ash} artists with the most paintings available in the dataset are:') df['artist'].value_counts().head(ash)
code
89130056/cell_36
[ "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
hog_classifier = svm.SVC(kernel='rbf', gamma=1.5, C=0.3) hog_classifier.fit(X_train, y_train)
code
50229480/cell_42
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape b = df[df['Year'] == '2007'] b.shape c = df[df['Year'] == '2008'] c.shape plt.figure(figsize=[20, 10]) sns.heatmap(c.corr(), cmap='coolwarm', annot=True)
code
50229480/cell_21
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape a.describe()
code
50229480/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df.describe()
code
50229480/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum()
code
50229480/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) b = df[df['Year'] == '2007'] b.shape
code
50229480/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) b = df[df['Year'] == '2007'] b.shape temp_range_2007 = b['Temperature (C)'].max() - b['Temperature (C)'].min() temp_range_2007
code
50229480/cell_20
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape a['Wind Speed (km/h)'].mean()
code
50229480/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.info()
code
50229480/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) c = df[df['Year'] == '2008'] c.shape c.info()
code
50229480/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) c = df[df['Year'] == '2008'] c.shape
code
50229480/cell_48
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape b = df[df['Year'] == '2007'] b.shape c = df[df['Year'] == '2008'] c.shape d = df[df['Year'] == '2009'] d.shape plt.figure(figsize=[20, 10]) sns.heatmap(d.corr(), cmap='coolwarm', annot=True)
code
50229480/cell_41
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) c = df[df['Year'] == '2008'] c.shape c.describe()
code
50229480/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df.head()
code
50229480/cell_19
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape a['Temperature (C)'].mean()
code
50229480/cell_52
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) e = df[df['Year'] == '2010'] e.shape e.info()
code
50229480/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.describe()
code
50229480/cell_45
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) d = df[df['Year'] == '2009'] d.shape
code
50229480/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape a.info()
code
50229480/cell_32
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) b = df[df['Year'] == '2007'] b.shape b.describe()
code
50229480/cell_51
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) e = df[df['Year'] == '2010'] e.shape
code
50229480/cell_8
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum()
code
50229480/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) df.head()
code
50229480/cell_47
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) d = df[df['Year'] == '2009'] d.shape d.describe()
code
50229480/cell_17
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape
code
50229480/cell_31
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) b = df[df['Year'] == '2007'] b.shape b.info()
code
50229480/cell_46
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) d = df[df['Year'] == '2009'] d.shape d.info()
code
50229480/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape humidity_range_2006 = a['Humidity'].max() - a['Humidity'].min() humidity_range_2006
code
50229480/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df.head()
code
50229480/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape temp_range_2006 = a['Temperature (C)'].max() - a['Temperature (C)'].min() temp_range_2006
code
50229480/cell_53
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) e = df[df['Year'] == '2010'] e.shape e.describe()
code
50229480/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Precip Type'].value_counts()
code
50229480/cell_27
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape plt.figure(figsize=[20, 10]) sns.heatmap(a.corr(), cmap='coolwarm', annot=True)
code
50229480/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df['Formatted Date'].value_counts()
code
50229480/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
50229480/cell_36
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/szeged-weather/weatherHistory.csv') df.isnull().sum() df = df.dropna() df.isnull().sum() df['Formatted Date'] = df['Formatted Date'].str.split(' ').str[0].str.split('-').str[0] df = df.rename(columns={'Formatted Date': 'Year'}) df = df.drop(['Loud Cover'], axis=1) a = df[df['Year'] == '2016'] a.shape b = df[df['Year'] == '2007'] b.shape plt.figure(figsize=[20, 10]) sns.heatmap(b.corr(), cmap='coolwarm', annot=True)
code
129022822/cell_13
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) print(last_record_test_mape, last_record_test_mse, last_record_test_rmse)
code
129022822/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test
code
129022822/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980]
code
129022822/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129022822/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test
code
129022822/cell_18
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) import matplotlib.pyplot as plt def create_bar_graph(values, labels=['arith_mean', 'latest_record', 'last_4_record']): """ Create a bar graph for three values. Arguments: labels -- a list of labels for each value values -- a list of three numerical values """ # Create a figure and axis fig, ax = plt.subplots() # Create a bar plot ax.bar(labels, values) # Add labels and title ax.set_xlabel('Categories') ax.set_ylabel('Values') ax.set_title('Bar Graph') # Display the plot plt.show() def create_list(var1, var2, var3): """ Create a list from three variables. Arguments: var1 -- the first variable var2 -- the second variable var3 -- the third variable Returns: A list containing var1, var2, and var3. """ return [var1, var2, var3] create_bar_graph(create_list(arith_test_rmse, last_4_quarter_test_rmse, last_4_quarter_test_rmse))
code
129022822/cell_16
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) import matplotlib.pyplot as plt def create_bar_graph(values, labels=['arith_mean', 'latest_record', 'last_4_record']): """ Create a bar graph for three values. Arguments: labels -- a list of labels for each value values -- a list of three numerical values """ # Create a figure and axis fig, ax = plt.subplots() # Create a bar plot ax.bar(labels, values) # Add labels and title ax.set_xlabel('Categories') ax.set_ylabel('Values') ax.set_title('Bar Graph') # Display the plot plt.show() def create_list(var1, var2, var3): """ Create a list from three variables. Arguments: var1 -- the first variable var2 -- the second variable var3 -- the third variable Returns: A list containing var1, var2, and var3. """ return [var1, var2, var3] create_bar_graph(create_list(arith_test_mape, last_record_test_mape, last_4_quarter_test_mape))
code
129022822/cell_17
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) import matplotlib.pyplot as plt def create_bar_graph(values, labels=['arith_mean', 'latest_record', 'last_4_record']): """ Create a bar graph for three values. Arguments: labels -- a list of labels for each value values -- a list of three numerical values """ # Create a figure and axis fig, ax = plt.subplots() # Create a bar plot ax.bar(labels, values) # Add labels and title ax.set_xlabel('Categories') ax.set_ylabel('Values') ax.set_title('Bar Graph') # Display the plot plt.show() def create_list(var1, var2, var3): """ Create a list from three variables. Arguments: var1 -- the first variable var2 -- the second variable var3 -- the third variable Returns: A list containing var1, var2, and var3. """ return [var1, var2, var3] create_bar_graph(create_list(arith_test_mse, last_record_test_mse, last_record_test_mse))
code
129022822/cell_14
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) print(last_4_quarter_test_mape, last_4_quarter_test_mse, last_4_quarter_test_rmse)
code
129022822/cell_12
[ "text_html_output_1.png" ]
from sklearn.metrics import mean_squared_error, mean_absolute_percentage_error import math import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test def last_record(ser): last = ser.tail(1) test = test_df.copy() test['value'] = float(last) return test last_record_test = last_record(train_df['value']) last_record_test def last_4_quarter(ser): test = test_df.copy() test['value'] = ser.tail(4)['value'].unique() return test last_4_quarter_test = last_4_quarter(train_df) last_4_quarter_test arith_test_mape = mean_absolute_percentage_error(test_df['value'], arith_test['value']) arith_test_mse = mean_squared_error(test_df['value'], arith_test['value']) arith_test_rmse = math.sqrt(arith_test_mse) last_record_test_mape = mean_absolute_percentage_error(test_df['value'], last_record_test['value']) last_record_test_mse = mean_squared_error(test_df['value'], last_record_test['value']) last_record_test_rmse = math.sqrt(last_record_test_mse) last_4_quarter_test_mape = mean_absolute_percentage_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_mse = mean_squared_error(test_df['value'], last_4_quarter_test['value']) last_4_quarter_test_rmse = math.sqrt(last_4_quarter_test_mse) print(arith_test_mape, arith_test_mse, arith_test_rmse)
code
129022822/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/johnson/Johnson.csv') df.drop('Unnamed: 0', axis=1, inplace=True) train_df = df[df['time'] < 1980] test_df = df[df['time'] >= 1980] def Arith_mean(ser): mean = ser.mean() test = test_df.copy() test['value'] = mean return test arith_test = Arith_mean(train_df['value']) arith_test
code
106202262/cell_21
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go import re df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') for companyName in df.company.unique(): fig = go.Figure(data=[go.Bar(y=df.label[df['company'] == companyName].value_counts(), x=df.label[df['company'] == companyName].unique())]) fig.update_layout(title=companyName + ' Ratings') df.review_description = df.review_description.astype(str) df.review_description = df.review_description.apply(lambda x: re.sub('[%s]' % re.escape('!"#$%&\'()*+,،-./:;<=>؟?@[\\]^_`{|}~'), ' ', x)) df.review_description = df.review_description.apply(lambda x: x.replace('؛', '')) df.head()
code
106202262/cell_25
[ "text_html_output_10.png", "text_html_output_4.png", "text_html_output_6.png", "text_html_output_2.png", "text_html_output_5.png", "text_html_output_9.png", "text_html_output_1.png", "text_html_output_12.png", "text_html_output_11.png", "text_html_output_8.png", "text_html_output_3.png", "text_html_output_7.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go import re df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') for companyName in df.company.unique(): fig = go.Figure(data=[go.Bar(y=df.label[df['company'] == companyName].value_counts(), x=df.label[df['company'] == companyName].unique())]) fig.update_layout(title=companyName + ' Ratings') df.review_description = df.review_description.astype(str) df.review_description = df.review_description.apply(lambda x: re.sub('[%s]' % re.escape('!"#$%&\'()*+,،-./:;<=>؟?@[\\]^_`{|}~'), ' ', x)) df.review_description = df.review_description.apply(lambda x: x.replace('؛', '')) df.review_description[5]
code
106202262/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df
code
106202262/cell_29
[ "text_plain_output_1.png" ]
from nltk.corpus import stopwords from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go import re df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') for companyName in df.company.unique(): fig = go.Figure(data=[go.Bar(y=df.label[df['company'] == companyName].value_counts(), x=df.label[df['company'] == companyName].unique())]) fig.update_layout(title=companyName + ' Ratings') df.review_description = df.review_description.astype(str) df.review_description = df.review_description.apply(lambda x: re.sub('[%s]' % re.escape('!"#$%&\'()*+,،-./:;<=>؟?@[\\]^_`{|}~'), ' ', x)) df.review_description = df.review_description.apply(lambda x: x.replace('؛', '')) stopWords = list(set(stopwords.words('arabic'))) for word in ['لا', 'لكن', 'ولكن']: stopWords.remove(word) df.review_description[5] ' '.join([word for word in df.review_description[5].split() if word not in stopWords]) df.review_description = df.review_description.apply(lambda x: ' '.join([word for word in x.split() if word not in stopWords])) df.head()
code
106202262/cell_26
[ "text_html_output_1.png" ]
from nltk.corpus import stopwords from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go import re df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') for companyName in df.company.unique(): fig = go.Figure(data=[go.Bar(y=df.label[df['company'] == companyName].value_counts(), x=df.label[df['company'] == companyName].unique())]) fig.update_layout(title=companyName + ' Ratings') df.review_description = df.review_description.astype(str) df.review_description = df.review_description.apply(lambda x: re.sub('[%s]' % re.escape('!"#$%&\'()*+,،-./:;<=>؟?@[\\]^_`{|}~'), ' ', x)) df.review_description = df.review_description.apply(lambda x: x.replace('؛', '')) stopWords = list(set(stopwords.words('arabic'))) for word in ['لا', 'لكن', 'ولكن']: stopWords.remove(word) df.review_description[5] ' '.join([word for word in df.review_description[5].split() if word not in stopWords])
code
106202262/cell_2
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import plotly.express as px import plotly.graph_objects as go import plotly.express as px from plotly.offline import init_notebook_mode, iplot from tashaphyne.stemming import ArabicLightStemmer from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report, roc_curve, f1_score, accuracy_score, recall_score, roc_auc_score, make_scorer from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score, GridSearchCV from sklearn import metrics from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.metrics import confusion_matrix, mean_squared_error, precision_score, recall_score, f1_score from xgboost import XGBClassifier import re import emoji from nltk.corpus import stopwords init_notebook_mode(connected=True) from sklearn.feature_extraction.text import TfidfVectorizer
code
106202262/cell_1
[ "text_plain_output_1.png" ]
!pip install Arabic-Stopwords !pip install emoji !pip install Tashaphyne
code
106202262/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum()
code
106202262/cell_15
[ "text_html_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') fig.show()
code
106202262/cell_17
[ "text_plain_output_1.png" ]
from plotly.offline import init_notebook_mode, iplot import pandas as pd import plotly.express as px import plotly.express as px import plotly.graph_objects as go df = pd.read_excel('../input/arabic-companies-reviews-for-sentiment-analysis/Arabic_Reviews.xlsx') df.drop(inplace=True, columns=['Unnamed: 0']) df.review_description.duplicated().sum() df.drop(df[df.review_description.duplicated() == True].index, axis=0, inplace=True) df = df.rename({'rating(1 postive 0 neutral -1 negative': 'label'}, axis=1) fig = go.Figure(data=[go.Pie(labels=['postive', 'negative', 'neutral'], values=[df.label[df.label == x].count() for x in df.label.unique()], pull=[0, 0.1, 0])]) fig.update_layout(title='Ratings') df2 = df.copy() df2.label = df.label.map({0: 'neutral', 1: 'postive', -1: 'negative'}) fig = px.sunburst(df2, path=['company', 'label'], title='Companies and Feedbacks', color_continuous_scale='RdBu', color='label') fig.update_traces(textinfo='label + percent parent') for companyName in df.company.unique(): fig = go.Figure(data=[go.Bar(y=df.label[df['company'] == companyName].value_counts(), x=df.label[df['company'] == companyName].unique())]) fig.update_layout(title=companyName + ' Ratings') iplot(fig)
code
72088017/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') display(train_df.shape) display(train_df.head()) display(train_df.info())
code
72088017/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') train_df[train_df.columns[2:]].std().hist() plt.title('Distribution of stds of all columns')
code
72088017/cell_29
[ "text_html_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import RidgeClassifier import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') model = Lasso(alpha=0.0299) model1 = RidgeClassifier(alpha=0.005) model.fit(X_train, y_train) ypred_train = model.predict(X_train) ypred_val = model.predict(X_val) model.fit(X_train, y_train) ypred_train = model.predict(X_train) ypred_val = model.predict(X_val) ypred = model.predict(test_df) ypred output = pd.DataFrame({'id': test_df.id, 'target': ypred}) output.head()
code
72088017/cell_26
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import RidgeClassifier from sklearn.metrics import roc_auc_score y_val.shape model = Lasso(alpha=0.0299) model1 = RidgeClassifier(alpha=0.005) model.fit(X_train, y_train) ypred_train = model.predict(X_train) ypred_val = model.predict(X_val) print('The train score is = {} '.format(roc_auc_score(y_train, ypred_train))) print('The validation score is = {}'.format(roc_auc_score(y_val, ypred_val)))
code
72088017/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72088017/cell_18
[ "text_html_output_2.png", "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') corrs = train_df.corr().abs().unstack().sort_values(kind='quicksort').reset_index() corrs = corrs[corrs['level_0'] != corrs['level_1']] corrs.tail(15)
code
72088017/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import Lasso from sklearn.linear_model import RidgeClassifier import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') model = Lasso(alpha=0.0299) model1 = RidgeClassifier(alpha=0.005) model.fit(X_train, y_train) ypred_train = model.predict(X_train) ypred_val = model.predict(X_val) model.fit(X_train, y_train) ypred_train = model.predict(X_train) ypred_val = model.predict(X_val) ypred = model.predict(test_df) ypred
code
72088017/cell_8
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') train_df[train_df.columns[2:]].mean().hist() plt.title('Distribution of means of all columns')
code
72088017/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') print(train_df.duplicated().sum()) print(train_df.duplicated().sum())
code
72088017/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') print(train_df.isnull().any().any()) print(test_df.isnull().any().any())
code
72088017/cell_22
[ "text_plain_output_1.png" ]
y_val.shape
code
72088017/cell_10
[ "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') display(train_df.describe()) display(test_df.describe())
code
72088017/cell_12
[ "text_plain_output_3.png", "text_html_output_1.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') train_df['target'].value_counts()
code
72088017/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/dont-overfit-ii/train.csv') test_df = pd.read_csv('/kaggle/input/dont-overfit-ii/test.csv') display(test_df.shape) display(test_df.head()) display(test_df.info())
code
129011222/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() print(train.isna().sum().sort_values(ascending=False))
code
129011222/cell_25
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) print(FEATURES)
code
129011222/cell_30
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) test_null = pd.DataFrame(test.isna().sum()) test_null
code
129011222/cell_33
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False) test_null = pd.DataFrame(test.isna().sum()) train_null = pd.DataFrame(train.isna().sum()) train_null
code
129011222/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' test.count() test.isna().sum().sort_values(ascending=False) test.describe()
code
129011222/cell_40
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False) test_null = pd.DataFrame(test.isna().sum()) train_null = pd.DataFrame(train.isna().sum()) train.dtypes
code
129011222/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) test_null = pd.DataFrame(test.isna().sum()) test.isna().sum()
code
129011222/cell_39
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False) test_null = pd.DataFrame(test.isna().sum()) train_null = pd.DataFrame(train.isna().sum()) train.head()
code
129011222/cell_26
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False)
code
129011222/cell_41
[ "text_plain_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False) test_null = pd.DataFrame(test.isna().sum()) test.isna().sum() test_null = test_null.sort_values(by=0, ascending=False) train_null = pd.DataFrame(train.isna().sum()) train_null = train_null.sort_values(by=0, ascending=False)[:-1] fig = make_subplots(rows=1, cols=2, column_titles = ["Train Data", "Test Data",], x_title = "Missing Values") fig.add_trace(go.Bar(x=train_null[0], y=train_null.index, orientation='h', marker=dict(color=[n for n in range(12)])), 1, 1) fig.add_trace(go.Bar(x=test_null[0], y=test_null.index, orientation='h', marker=dict(color=[n for n in range(12)])), 1, 2) fig.update_layout(showlegend=False, title_text='Column wise Null Value Distribution', title_x=0.5) train.dtypes df = pd.concat([train[FEATURES], test[FEATURES]], axis=0) text_features = ['Cabin', 'Name'] cat_features = [col for col in FEATURES if df[col].nunique() < 25 and col not in text_features] cont_features = [col for col in FEATURES if df[col].nunique() >= 25 and col not in text_features] del df print(f'Total number of features: {len(FEATURES)}') print(f'Number of categorical features: {len(cat_features)}') print(f'Number of continuos features: {len(cont_features)}') print(f'Number of text features: {len(text_features)}') labels = ['Categorical', 'Continuous', 'Text'] values = [len(cat_features), len(cont_features), len(text_features)] fig = go.Figure(data=[go.Pie(labels=labels, values=values, pull=[0.1, 0, 0])]) fig.show()
code
129011222/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' print(f'train data shape: {train.shape}') print(f'Number of rows in train data: {train.shape[0]}') print(f'Number of columns in train data: {train.shape[1]}') print(f'Number of values in train data: {train.count().sum()}') print(f'Number missing values in train data: {sum(train.isna().sum())}')
code
129011222/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' test.count() test.isna().sum().sort_values(ascending=False)
code
129011222/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129011222/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' test.count()
code
129011222/cell_16
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' test.head()
code
129011222/cell_17
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' print(f'test data shape: {test.shape}') print(f'Number of rows in test data: {test.shape[0]}') print(f'Number of columns in test data: {test.shape[1]}') print(f'Number of values in test data: {test.count().sum()}') print(f'Number missing values in test data: {sum(test.isna().sum())}')
code
129011222/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() train.describe()
code
129011222/cell_22
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' submission.head()
code
129011222/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.head()
code
129011222/cell_37
[ "text_html_output_2.png" ]
from plotly.subplots import make_subplots import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import plotly.graph_objects as go train = pd.read_csv('../input/spaceship-titanic/train.csv') test = pd.read_csv('../input/spaceship-titanic/test.csv') submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') RANDOM_STATE = 12 FOLDS = 5 STRATEGY = 'median' train.count() test.count() test.isna().sum().sort_values(ascending=False) train.drop(columns=['PassengerId'], inplace=True) test.drop(columns=['PassengerId'], inplace=True) TARGET = 'Transported' FEATURES = [] for col in train.columns: if col != TARGET: FEATURES.append(col) train.iloc[:, :-1].describe().T.sort_values(by='mean', ascending=False) test_null = pd.DataFrame(test.isna().sum()) test_null = test_null.sort_values(by=0, ascending=False) train_null = pd.DataFrame(train.isna().sum()) train_null = train_null.sort_values(by=0, ascending=False)[:-1] fig = make_subplots(rows=1, cols=2, column_titles = ["Train Data", "Test Data",], x_title = "Missing Values") fig.add_trace(go.Bar(x=train_null[0], y=train_null.index, orientation='h', marker=dict(color=[n for n in range(12)])), 1, 1) fig.add_trace(go.Bar(x=test_null[0], y=test_null.index, orientation='h', marker=dict(color=[n for n in range(12)])), 1, 2) fig.update_layout(showlegend=False, title_text='Column wise Null Value Distribution', title_x=0.5)
code