path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
74046505/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() sns.heatmap(data=df.corr(), annot=True)
code
74046505/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df_unknown = df[df['bike_name'] == 'unknown'] df_unknown
code
74046505/cell_27
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.ensemble import RandomForestRegressor from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.pipeline import Pipeline from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder, LabelEncoder numerical_pipeline = Pipeline([('scaling', MinMaxScaler())]) categoric_second_pipeline = Pipeline([('label', OneHotEncoder(sparse=False))]) preprocessing = ColumnTransformer([('numeric', numerical_pipeline, ['kms_driven', 'age', 'power']), ('cat_second', categoric_second_pipeline, ['owner', 'bike_name', 'city', 'brand'])]) pipeline = Pipeline([('algo', preprocessing), ('model', RandomForestRegressor(random_state=42))]) pipeline.fit(X_train, y_train) pipeline.get_params() parameter = {'model__n_estimators': [100, 200, 350, 500], 'model__min_samples_leaf': [2, 10, 30]} model = GridSearchCV(pipeline, parameter, cv=3, n_jobs=-1, verbose=1) model.fit(X_train, y_train)
code
74046505/cell_12
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols numeric_cols = [numeric for numeric in df.columns if df[numeric].dtype in ['float64']] numeric_cols df.brand.unique() df.bike_name.unique() df.city.nunique() df_missing = df.isna().sum() print(df_missing[df_missing > 0])
code
74046505/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import seaborn as sns df = pd.read_csv('../input/used-bikes-prices-in-india/Used_Bikes.csv') df df.shape categoric_cols = [categoric for categoric in df.columns if df[categoric].dtype in ['object']] categoric_cols
code
18150474/cell_4
[ "text_plain_output_1.png" ]
import sys !conda install --yes --prefix {sys.prefix} -c rdkit rdkit
code
18150474/cell_11
[ "text_plain_output_1.png" ]
import os import pybel file_dir = '../input/champs-scalar-coupling/structures/' mols_files = os.listdir(file_dir) mols_index = dict(map(reversed, enumerate(mols_files))) mol_name = list(mols_index.keys()) def xyz_to_smiles(fname: str) -> str: mol = next(pybel.readfile('xyz', fname)) smi = mol.write(format='smi') return smi.split()[0].strip() smiles = [xyz_to_smiles(file_dir + i) for i in tqdm(mol_name)]
code
18150474/cell_3
[ "text_html_output_1.png" ]
!conda install openbabel -c openbabel -y
code
18150474/cell_14
[ "text_plain_output_1.png" ]
from mol2vec.features import mol2alt_sentence from rdkit import Chem import os import pandas as pd import pybel file_dir = '../input/champs-scalar-coupling/structures/' mols_files = os.listdir(file_dir) mols_index = dict(map(reversed, enumerate(mols_files))) mol_name = list(mols_index.keys()) def xyz_to_smiles(fname: str) -> str: mol = next(pybel.readfile('xyz', fname)) smi = mol.write(format='smi') return smi.split()[0].strip() smiles = [xyz_to_smiles(file_dir + i) for i in tqdm(mol_name)] df_smiles = pd.DataFrame({'molecule_name': mol_name, 'smiles': smiles}) sentence = mol2alt_sentence(Chem.MolFromSmiles(df_smiles.smiles[33]), 1) print('SMILE:', df_smiles.smiles[33]) print(sentence)
code
18150474/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd import pybel file_dir = '../input/champs-scalar-coupling/structures/' mols_files = os.listdir(file_dir) mols_index = dict(map(reversed, enumerate(mols_files))) mol_name = list(mols_index.keys()) def xyz_to_smiles(fname: str) -> str: mol = next(pybel.readfile('xyz', fname)) smi = mol.write(format='smi') return smi.split()[0].strip() smiles = [xyz_to_smiles(file_dir + i) for i in tqdm(mol_name)] df_smiles = pd.DataFrame({'molecule_name': mol_name, 'smiles': smiles}) df_smiles.head(11)
code
18150474/cell_5
[ "text_plain_output_1.png" ]
!pip install git+https://github.com/samoturk/mol2vec
code
106196488/cell_9
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.columns
code
106196488/cell_2
[ "text_plain_output_1.png" ]
pip install pyspark
code
106196488/cell_8
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.show(1, vertical=True)
code
106196488/cell_3
[ "text_html_output_1.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark
code
106196488/cell_5
[ "text_plain_output_1.png" ]
from pyspark.sql import SparkSession from pyspark.sql import SparkSession walmart_spark = SparkSession.builder.appName('Walmart_Stock_Price').getOrCreate() walmart_spark df_wmt = walmart_spark.read.csv('../input/wmtdata/WMT.csv', header=True, inferSchema=True) df_wmt.show()
code
1009103/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5)) ax1.imshow(montage2d(fossil_data), cmap='bone') ax1.set_title('Axial Slices') _ = ax2.hist(fossil_data.ravel(), 20) ax2.set_title('Overall Histogram')
code
1009103/cell_3
[ "text_plain_output_1.png" ]
from skimage.io import imread import numpy as np # linear algebra fossil_path = '../input/Gut-PhilElvCropped.tif' fossil_data_rgb = imread(fossil_path) fossil_data = np.mean(fossil_data_rgb, -1) print('Loading Fossil Data sized {}'.format(fossil_data.shape))
code
1009103/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
skip_slices = 30 fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(14, 5)) ax1.imshow(montage2d(fossil_data[skip_slices:-skip_slices]), cmap='bone') ax1.set_title('Axial Slices') ax2.imshow(montage2d(fossil_data.transpose(1, 2, 0)[skip_slices:-skip_slices]), cmap='bone') ax2.set_title('Saggital Slices') ax3.imshow(montage2d(fossil_data.transpose(2, 0, 1)[skip_slices:-skip_slices]), cmap='bone') ax3.set_title('Coronal Slices')
code
16157745/cell_6
[ "application_vnd.jupyter.stderr_output_9.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_11.png", "text_plain_output_4.png", "text_plain_output_14.png", "text_plain_output_10.png", "text_plain_output_6.png", "application_vnd.jupyter.stderr_output_13.png", "application_vnd.jupyter.stderr_output_3.png", "application_vnd.jupyter.stderr_output_5.png", "text_plain_output_16.png", "application_vnd.jupyter.stderr_output_15.png", "text_plain_output_8.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "text_plain_output_12.png" ]
import pandas as pd df = pd.read_csv('../input/train.csv') df.head()
code
16157745/cell_32
[ "text_plain_output_1.png" ]
from copy import deepcopy from sklearn.metrics import f1_score, accuracy_score import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df = df.set_index('ID_code') y = df['target'] X = df.drop(['target'], axis=1) X_train, X_test = (np.matrix(X_train_df.values), np.matrix(X_test_df.values)) y_train, y_test = (np.matrix(y_train_df).T, np.matrix(y_test_df).T) def get_w_hat(X, t): xt_x_minus_one = np.linalg.inv(np.dot(X.T, X)) xt_t = np.dot(X.T, t) w_hat = np.dot(xt_x_minus_one, xt_t) return w_hat def get_sigma_hat(w, X, t, N): X_w = np.dot(X, w) t_minus_X_w = t - X_w sigma_hat = 1.0 / N * np.dot(t_minus_X_w.T, t_minus_X_w) return np.float(sigma_hat) def get_Sigma_w(sigma_square, X, S): return np.linalg.inv(1.0 / sigma_square * np.dot(X.T, X) + np.linalg.inv(S)) def get_mean_w(sigma_square, Sigma, X, t): return 1.0 / sigma_square * np.dot(np.dot(Sigma, X.T), t) class BayesianLinearRegressor: def __init__(self, n=1, sigma_unif=1.0): self.n = n self.sigma_unif = sigma_unif def lrfit(self, X_, t): X = get_X_powered(X_, self.n) X = get_X_w_bias(X) _, S = generate_prior(X.shape[1], sigma_unif=self.sigma_unif) w_hat = get_w_hat(X, t) sigma_square = get_sigma_hat(w_hat, X, t, X.shape[0]) Sigma = get_Sigma_w(sigma_square, X, S) mean_w = get_mean_w(sigma_square, Sigma, X, t) self.mean_w, self.sigma_square, self.Sigma = (mean_w, sigma_square, Sigma) def predict(self, X): X_pow = get_X(X, self.n) return np.dot(X_pow, self.mean_w) def get_X_powered(X, n): """ Return the X matrix concat with all the samples powered to 2..n Args: X (numpy.matrix): Matrix (N, D) - N : number of rows - D : number of features n (str): The power of the matrix. Returns: X_result: Matrix X with all the n powers """ if n <= 1: return X X_result = deepcopy(X) X_powered = deepcopy(X) for i in range(2, n + 1): X_powered = np.multiply(X_powered, X) X_result = np.concatenate((X_result, X_powered), axis=1) return X_result def get_X_w_bias(X): """ Return The X matrix with the bias term Args: X (numpy.matrix): Matrix X (N, D) Returns: X_result (numpy.matrix): Matrix X (N, D + 1) with bias term put on the right column """ X_result = np.concatenate((X, np.matrix(np.ones(X.shape[0])).T), axis=1) return X_result def get_X(X, n=1): """ Put together the 2 functions above since they are often used together """ return get_X_w_bias(get_X_powered(X, n)) def generate_prior(features, sigma_unif=3.0): S = np.matrix(np.diag(sigma_unif * np.ones(features))) return (np.matrix(np.zeros(features)), S) def print_err(y_pred, y, total=False): """ Display the error of the Regression predictions """ err = y - y_pred tot_err = np.sum(np.multiply(err, err)) def sigmoid(x): return 1.0 / (1.0 + np.exp(-x)) @np.vectorize def discretize(pred, threshold=0.5): """ The decorator vectorize allows to apply the function to each element """ return 1 if pred > threshold else 0 blr = BayesianLinearRegressor(n=3) blr.lrfit(X_train, y_train) y_test_und = blr.predict(X_test) def print_metrics(y_test, y_pred): """ Utility function to display the most important metrics """ y_pred = discretize(y_test_und, 0.5) print_metrics(y_pred, y_test)
code
16157745/cell_28
[ "text_plain_output_1.png" ]
from copy import deepcopy import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df = df.set_index('ID_code') y = df['target'] X = df.drop(['target'], axis=1) X_train, X_test = (np.matrix(X_train_df.values), np.matrix(X_test_df.values)) y_train, y_test = (np.matrix(y_train_df).T, np.matrix(y_test_df).T) def get_w_hat(X, t): xt_x_minus_one = np.linalg.inv(np.dot(X.T, X)) xt_t = np.dot(X.T, t) w_hat = np.dot(xt_x_minus_one, xt_t) return w_hat def get_sigma_hat(w, X, t, N): X_w = np.dot(X, w) t_minus_X_w = t - X_w sigma_hat = 1.0 / N * np.dot(t_minus_X_w.T, t_minus_X_w) return np.float(sigma_hat) def get_Sigma_w(sigma_square, X, S): return np.linalg.inv(1.0 / sigma_square * np.dot(X.T, X) + np.linalg.inv(S)) def get_mean_w(sigma_square, Sigma, X, t): return 1.0 / sigma_square * np.dot(np.dot(Sigma, X.T), t) class BayesianLinearRegressor: def __init__(self, n=1, sigma_unif=1.0): self.n = n self.sigma_unif = sigma_unif def lrfit(self, X_, t): X = get_X_powered(X_, self.n) X = get_X_w_bias(X) _, S = generate_prior(X.shape[1], sigma_unif=self.sigma_unif) w_hat = get_w_hat(X, t) sigma_square = get_sigma_hat(w_hat, X, t, X.shape[0]) Sigma = get_Sigma_w(sigma_square, X, S) mean_w = get_mean_w(sigma_square, Sigma, X, t) self.mean_w, self.sigma_square, self.Sigma = (mean_w, sigma_square, Sigma) def predict(self, X): X_pow = get_X(X, self.n) return np.dot(X_pow, self.mean_w) def get_X_powered(X, n): """ Return the X matrix concat with all the samples powered to 2..n Args: X (numpy.matrix): Matrix (N, D) - N : number of rows - D : number of features n (str): The power of the matrix. Returns: X_result: Matrix X with all the n powers """ if n <= 1: return X X_result = deepcopy(X) X_powered = deepcopy(X) for i in range(2, n + 1): X_powered = np.multiply(X_powered, X) X_result = np.concatenate((X_result, X_powered), axis=1) return X_result def get_X_w_bias(X): """ Return The X matrix with the bias term Args: X (numpy.matrix): Matrix X (N, D) Returns: X_result (numpy.matrix): Matrix X (N, D + 1) with bias term put on the right column """ X_result = np.concatenate((X, np.matrix(np.ones(X.shape[0])).T), axis=1) return X_result def get_X(X, n=1): """ Put together the 2 functions above since they are often used together """ return get_X_w_bias(get_X_powered(X, n)) def generate_prior(features, sigma_unif=3.0): S = np.matrix(np.diag(sigma_unif * np.ones(features))) return (np.matrix(np.zeros(features)), S) def print_err(y_pred, y, total=False): """ Display the error of the Regression predictions """ err = y - y_pred tot_err = np.sum(np.multiply(err, err)) blr = BayesianLinearRegressor(n=3) blr.lrfit(X_train, y_train) y_test_und = blr.predict(X_test) posterior_variance = blr.Sigma print('Posterior variance matrix with shape: ', posterior_variance.shape)
code
16157745/cell_24
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/train.csv') df = df.set_index('ID_code') y = df['target'] X = df.drop(['target'], axis=1) fig, ax = plt.subplots(figsize=(13, 3)) # TR the count is computed automatically g = sns.countplot(x='target', data=df) plt.show() fig, ax = plt.subplots(ncols=3, figsize=(20, 4)) sns.distplot(df['var_0'], ax=ax[0], color='red') sns.distplot(df['var_1'], ax=ax[1], color='green') sns.distplot(df['var_3'], ax=ax[2], color='blue') plt.show()
code
16157745/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import seaborn as sns df = pd.read_csv('../input/train.csv') df = df.set_index('ID_code') y = df['target'] X = df.drop(['target'], axis=1) fig, ax = plt.subplots(figsize=(13, 3)) g = sns.countplot(x='target', data=df) plt.show()
code
16157745/cell_27
[ "image_output_1.png" ]
from copy import deepcopy import numpy as np import pandas as pd df = pd.read_csv('../input/train.csv') df = df.set_index('ID_code') y = df['target'] X = df.drop(['target'], axis=1) X_train, X_test = (np.matrix(X_train_df.values), np.matrix(X_test_df.values)) y_train, y_test = (np.matrix(y_train_df).T, np.matrix(y_test_df).T) def get_w_hat(X, t): xt_x_minus_one = np.linalg.inv(np.dot(X.T, X)) xt_t = np.dot(X.T, t) w_hat = np.dot(xt_x_minus_one, xt_t) return w_hat def get_sigma_hat(w, X, t, N): X_w = np.dot(X, w) t_minus_X_w = t - X_w sigma_hat = 1.0 / N * np.dot(t_minus_X_w.T, t_minus_X_w) return np.float(sigma_hat) def get_Sigma_w(sigma_square, X, S): return np.linalg.inv(1.0 / sigma_square * np.dot(X.T, X) + np.linalg.inv(S)) def get_mean_w(sigma_square, Sigma, X, t): return 1.0 / sigma_square * np.dot(np.dot(Sigma, X.T), t) class BayesianLinearRegressor: def __init__(self, n=1, sigma_unif=1.0): self.n = n self.sigma_unif = sigma_unif def lrfit(self, X_, t): X = get_X_powered(X_, self.n) X = get_X_w_bias(X) _, S = generate_prior(X.shape[1], sigma_unif=self.sigma_unif) w_hat = get_w_hat(X, t) sigma_square = get_sigma_hat(w_hat, X, t, X.shape[0]) Sigma = get_Sigma_w(sigma_square, X, S) mean_w = get_mean_w(sigma_square, Sigma, X, t) self.mean_w, self.sigma_square, self.Sigma = (mean_w, sigma_square, Sigma) def predict(self, X): X_pow = get_X(X, self.n) return np.dot(X_pow, self.mean_w) def get_X_powered(X, n): """ Return the X matrix concat with all the samples powered to 2..n Args: X (numpy.matrix): Matrix (N, D) - N : number of rows - D : number of features n (str): The power of the matrix. Returns: X_result: Matrix X with all the n powers """ if n <= 1: return X X_result = deepcopy(X) X_powered = deepcopy(X) for i in range(2, n + 1): X_powered = np.multiply(X_powered, X) X_result = np.concatenate((X_result, X_powered), axis=1) return X_result def get_X_w_bias(X): """ Return The X matrix with the bias term Args: X (numpy.matrix): Matrix X (N, D) Returns: X_result (numpy.matrix): Matrix X (N, D + 1) with bias term put on the right column """ X_result = np.concatenate((X, np.matrix(np.ones(X.shape[0])).T), axis=1) return X_result def get_X(X, n=1): """ Put together the 2 functions above since they are often used together """ return get_X_w_bias(get_X_powered(X, n)) def generate_prior(features, sigma_unif=3.0): S = np.matrix(np.diag(sigma_unif * np.ones(features))) return (np.matrix(np.zeros(features)), S) def print_err(y_pred, y, total=False): """ Display the error of the Regression predictions """ err = y - y_pred tot_err = np.sum(np.multiply(err, err)) blr = BayesianLinearRegressor(n=3) blr.lrfit(X_train, y_train) y_test_und = blr.predict(X_test) print_err(y_test_und, y_test, True)
code
122258955/cell_2
[ "text_plain_output_5.png", "application_vnd.jupyter.stderr_output_7.png", "application_vnd.jupyter.stderr_output_6.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install pytorch-lightning
code
122258955/cell_11
[ "text_plain_output_1.png" ]
from torch import nn from torch.utils.data import DataLoader, random_split from torchvision import transforms from torchvision.datasets import MNIST import os import os import pytorch_lightning as L import torch import torch.nn.functional as F import numpy as np import pandas as pd import os class TinyModel(torch.nn.Module): def __init__(self): super(TinyModel, self).__init__() self.linear1 = torch.nn.Linear(100, 20) self.activation1 = torch.nn.ReLU() self.linear2 = torch.nn.Linear(20, 5) self.activation2 = torch.nn.ReLU() self.linear3 = torch.nn.Linear(5, 2) def forward(self, x): x = self.linear1(x) x = self.activation1(x) x = self.linear2(x) x = self.activation2(x) x = self.linear3(x) return x class LitAutoEncoder(L.LightningModule): def __init__(self): super().__init__() self.encoder = nn.Sequential(nn.Linear(28 * 28, 128), nn.ReLU(), nn.Linear(128, 3)) self.decoder = nn.Sequential(nn.Linear(3, 128), nn.ReLU(), nn.Linear(128, 28 * 28)) def forward(self, x): embedding = self.encoder(x) return embedding def training_step(self, batch, batch_idx): x, y = batch x = x.view(x.size(0), -1) z = self.encoder(x) x_hat = self.decoder(z) loss = F.mse_loss(x_hat, x) return loss def validation_step(self, batch, batch_idx): x, y = batch x = x.view(x.size(0), -1) z = self.encoder(x) x_hat = self.decoder(z) loss = F.mse_loss(x_hat, x) self.log('val_loss', loss, prog_bar=True) return loss def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=0.001) return optimizer dataset = MNIST(os.getcwd(), download=True, transform=transforms.ToTensor()) train, val = random_split(dataset, [55000, 5000]) train_dataloader = DataLoader(train, batch_size=64, num_workers=4) val_dataloader = DataLoader(val, batch_size=64, num_workers=4) autoencoder = LitAutoEncoder() trainer = L.Trainer(val_check_interval=250, accelerator='gpu', devices=2, max_epochs=20, precision=16) trainer.fit(autoencoder, train_dataloader, val_dataloader)
code
89135962/cell_21
[ "text_plain_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); val_cutoff = train['time'].max() - dt.timedelta(hours=12) X_val = train[train['time'] > val_cutoff].reset_index(drop=True) print(X_val['time'].min(), X_val['time'].max()) print(X_val['time'].max() - X_val['time'].min())
code
89135962/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train train.groupby('roadway')['congestion'].agg('mean').plot.bar()
code
89135962/cell_25
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); df_sub = test[['row_id', 'roadway']].copy() preds_test = train.groupby('roadway')['congestion'].median().rename('congestion').reset_index().round(0).astype(int) df_sub = df_sub.merge(preds_test, on='roadway', how='left') df_sub
code
89135962/cell_4
[ "text_html_output_1.png" ]
import pandas as pd class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train
code
89135962/cell_23
[ "text_plain_output_1.png" ]
from sklearn import metrics import datetime as dt import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); val_cutoff = train['time'].max() - dt.timedelta(hours=12) X_train = train[train['time'] <= val_cutoff].reset_index(drop=True) X_val = train[train['time'] > val_cutoff].reset_index(drop=True) preds = X_train.groupby('roadway')['congestion'].median().rename('y_pred').reset_index().round(0).astype(int) df_preds = X_val.merge(preds, on='roadway', how='left') mae = metrics.mean_absolute_error(df_preds['congestion'], df_preds['y_pred']) print('MAE:', mae)
code
89135962/cell_20
[ "text_plain_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); val_cutoff = train['time'].max() - dt.timedelta(hours=12) X_train = train[train['time'] <= val_cutoff].reset_index(drop=True) print(X_train['time'].min(), X_train['time'].max()) print(X_train['time'].max() - X_train['time'].min())
code
89135962/cell_29
[ "text_plain_output_1.png" ]
!head submission.csv
code
89135962/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); df_sub = test[['row_id', 'roadway']].copy() preds_test = train.groupby('roadway')['congestion'].median().rename('congestion').reset_index().round(0).astype(int) df_sub = df_sub.merge(preds_test, on='roadway', how='left') df_sub plt.hist(train['congestion'], density=True, alpha=0.5, bins=30, label='Observed') plt.hist(df_sub['congestion'], density=True, alpha=0.5, bins=30, label='Predicted') plt.legend()
code
89135962/cell_2
[ "text_plain_output_1.png" ]
PATH = Path('../input/tabular-playground-series-mar-2022') !ls {PATH}
code
89135962/cell_19
[ "text_plain_output_1.png" ]
import datetime as dt import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); val_cutoff = train['time'].max() - dt.timedelta(hours=12) print('val_cutoff:', val_cutoff)
code
89135962/cell_7
[ "image_output_1.png" ]
import pandas as pd class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train train['congestion'].plot.hist()
code
89135962/cell_18
[ "image_output_1.png" ]
import pandas as pd class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train print(test['time'].min(), test['time'].max()) print(test['time'].max() - test['time'].min())
code
89135962/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); train['y'].value_counts().sort_index()
code
89135962/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); print(train['time'].min(), train['time'].max()) print(train['time'].max() - train['time'].min())
code
89135962/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0); train['x'].value_counts().sort_index()
code
89135962/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) plt.plot(df_roadway['congestion'])
code
89135962/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd from pathlib import Path import datetime as dt import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (10, 10) class CFG: n_roadways = 65 seed = 42 def create_roadways(df): roads = list(range(CFG.n_roadways)) return roads * int(len(df) / CFG.n_roadways) def preprocess(df): df_ = df.copy() df_['roadway'] = create_roadways(df_) df_['time'] = pd.to_datetime(df_['time']) return df_ train = pd.read_csv(PATH / 'train.csv') train = preprocess(train) test = pd.read_csv(PATH / 'test.csv') test = preprocess(test) train df_roadway = train[train['roadway'] == 0].reset_index(drop=True) ax = train.groupby('direction')['congestion'].mean().plot.bar() ax.bar_label(ax.containers[0]) plt.xticks(rotation=0)
code
128011626/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) dfcolumns = df.columns dfcolumns_indexes = {i: dfcolumns[i] for i in range(len(dfcolumns))} bad_data_indexes = [9, 11, 12, 13, 14, 15, 16, 17, 19, 20, 24, 27, 29, 33, 43, 44, 47, 49, 51, 69, 75, 77] bad_cols = df.columns[bad_data_indexes] bad_cols
code
128011626/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd from functools import reduce import operator import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
128011626/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) dfcolumns = df.columns dfcolumns_indexes = {i: dfcolumns[i] for i in range(len(dfcolumns))} for c in dfcolumns_indexes: col_name = dfcolumns_indexes[c] bad_data_indexes = [9, 11, 12, 13, 14, 15, 16, 17, 19, 20, 24, 27, 29, 33, 43, 44, 47, 49, 51, 69, 75, 77] bad_cols = df.columns[bad_data_indexes] bad_cols for col_name in bad_cols: print(f'{col_name}: {df[col_name].nunique()}')
code
128011626/cell_8
[ "text_html_output_1.png" ]
import operator import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) dfcolumns = df.columns dfcolumns_indexes = {i: dfcolumns[i] for i in range(len(dfcolumns))} bad_data_indexes = [9, 11, 12, 13, 14, 15, 16, 17, 19, 20, 24, 27, 29, 33, 43, 44, 47, 49, 51, 69, 75, 77] bad_cols = df.columns[bad_data_indexes] bad_cols df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) bad_float_types = set(df.columns) categorical_columns = ['Make', 'drive_wheels', 'Series', 'boost_type', 'back_suspension', 'country_of_origin', 'presence_of_intercooler', 'Generation', 'car_class', 'Body_type', 'rear_brakes', 'rating_name', 'emission_standards', 'front_suspension', 'steering_type', 'injection_type', 'transmission', 'fuel_grade', 'cylinder_layout', 'front_brakes', 'Modle', 'engine_type', 'overhead_camshaft'] cateforical_columns_check = ['engine_placement', 'Trim', 'number_of_seats', 'wheel_size_r14', 'front_rear_axle_load_kg', 'maximum_torque_n_m'] weird_columns = ['turnover_of_maximum_torque_rpm', 'range_km'] time_coumns = ['safety_assessment'] for c in time_coumns: bad_float_types.discard(c) for c in cateforical_columns_check: bad_float_types.discard(c) for c in categorical_columns: bad_float_types.discard(c) for c in weird_columns: bad_float_types.discard(c) def is_numeric(value): try: float(value) return True except ValueError: return False def cast_float_comma(value: str): return value.replace(',', '.') if ',' in value else value def cast_float_space(value: str): return value.replace(' ', '') if ' ' in value else value def cast_cylinder_bore_and_stroke_cycle_mm(value: str): if value == 'nan': return 'nan' if value: return 'x'.join([str(round(float(x.replace(',', '.')), 2)) for x in value.split('x')]) else: return None bad_comma_data = ['load_height_mm'] bad_space_data = ['clearance_mm'] bad_cylinder_bore_and_stroke_cycle_mm = ['cylinder_bore_and_stroke_cycle_mm'] bad_max_speed_km_per_h = ['max_speed_km_per_h'] bad_overhead_camshaft = ['overhead_camshaft'] bad_engine_hp_rpm = ['engine_hp_rpm'] bad_cargo_compartment_length_width_height_mm = ['cargo_compartment_length_width_height_mm'] for bad_col_name in bad_cargo_compartment_length_width_height_mm: df[bad_col_name] = df[bad_col_name].astype(str) prod_lambda = lambda x: str(reduce(operator.mul, (float(i) for i in x.split('x')), 1)) if x and len(x.split('x')) == 3 else 'nan' df[bad_col_name] = df[bad_col_name].apply(lambda x: prod_lambda(x)) for bad_col_name in bad_engine_hp_rpm: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(lambda x: 'nan' if '-' in x else x) for bad_col_name in bad_max_speed_km_per_h: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(lambda x: 'nan' if x == 'km/h' else x) for bad_col_name in bad_cylinder_bore_and_stroke_cycle_mm: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_cylinder_bore_and_stroke_cycle_mm) for bad_col_name in bad_space_data: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_float_space) for bad_col_name in bad_float_types: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_float_comma) non_float_ignore = set(['cylinder_bore_and_stroke_cycle_mm']) for ii, bad_col_name in enumerate(bad_float_types): if bad_col_name in non_float_ignore: continue df[bad_col_name] = df[bad_col_name].astype(str) mask = df[bad_col_name].apply(is_numeric) non_numeric_values = df.loc[~mask, bad_col_name] if len(non_numeric_values) > 0: print(bad_col_name, ii) print(non_numeric_values) break df[bad_col_name] = df[bad_col_name].astype(float) print('run without errors!')
code
128011626/cell_10
[ "text_plain_output_1.png" ]
import operator import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) dfcolumns = df.columns dfcolumns_indexes = {i: dfcolumns[i] for i in range(len(dfcolumns))} bad_data_indexes = [9, 11, 12, 13, 14, 15, 16, 17, 19, 20, 24, 27, 29, 33, 43, 44, 47, 49, 51, 69, 75, 77] bad_cols = df.columns[bad_data_indexes] bad_cols df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) bad_float_types = set(df.columns) categorical_columns = ['Make', 'drive_wheels', 'Series', 'boost_type', 'back_suspension', 'country_of_origin', 'presence_of_intercooler', 'Generation', 'car_class', 'Body_type', 'rear_brakes', 'rating_name', 'emission_standards', 'front_suspension', 'steering_type', 'injection_type', 'transmission', 'fuel_grade', 'cylinder_layout', 'front_brakes', 'Modle', 'engine_type', 'overhead_camshaft'] cateforical_columns_check = ['engine_placement', 'Trim', 'number_of_seats', 'wheel_size_r14', 'front_rear_axle_load_kg', 'maximum_torque_n_m'] weird_columns = ['turnover_of_maximum_torque_rpm', 'range_km'] time_coumns = ['safety_assessment'] for c in time_coumns: bad_float_types.discard(c) for c in cateforical_columns_check: bad_float_types.discard(c) for c in categorical_columns: bad_float_types.discard(c) for c in weird_columns: bad_float_types.discard(c) def is_numeric(value): try: float(value) return True except ValueError: return False def cast_float_comma(value: str): return value.replace(',', '.') if ',' in value else value def cast_float_space(value: str): return value.replace(' ', '') if ' ' in value else value def cast_cylinder_bore_and_stroke_cycle_mm(value: str): if value == 'nan': return 'nan' if value: return 'x'.join([str(round(float(x.replace(',', '.')), 2)) for x in value.split('x')]) else: return None bad_comma_data = ['load_height_mm'] bad_space_data = ['clearance_mm'] bad_cylinder_bore_and_stroke_cycle_mm = ['cylinder_bore_and_stroke_cycle_mm'] bad_max_speed_km_per_h = ['max_speed_km_per_h'] bad_overhead_camshaft = ['overhead_camshaft'] bad_engine_hp_rpm = ['engine_hp_rpm'] bad_cargo_compartment_length_width_height_mm = ['cargo_compartment_length_width_height_mm'] for bad_col_name in bad_cargo_compartment_length_width_height_mm: df[bad_col_name] = df[bad_col_name].astype(str) prod_lambda = lambda x: str(reduce(operator.mul, (float(i) for i in x.split('x')), 1)) if x and len(x.split('x')) == 3 else 'nan' df[bad_col_name] = df[bad_col_name].apply(lambda x: prod_lambda(x)) for bad_col_name in bad_engine_hp_rpm: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(lambda x: 'nan' if '-' in x else x) for bad_col_name in bad_max_speed_km_per_h: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(lambda x: 'nan' if x == 'km/h' else x) for bad_col_name in bad_cylinder_bore_and_stroke_cycle_mm: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_cylinder_bore_and_stroke_cycle_mm) for bad_col_name in bad_space_data: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_float_space) for bad_col_name in bad_float_types: df[bad_col_name] = df[bad_col_name].astype(str) df[bad_col_name] = df[bad_col_name].apply(cast_float_comma) non_float_ignore = set(['cylinder_bore_and_stroke_cycle_mm']) for ii, bad_col_name in enumerate(bad_float_types): if bad_col_name in non_float_ignore: continue df[bad_col_name] = df[bad_col_name].astype(str) mask = df[bad_col_name].apply(is_numeric) non_numeric_values = df.loc[~mask, bad_col_name] if len(non_numeric_values) > 0: break df[bad_col_name] = df[bad_col_name].astype(float) df = df.rename(columns={'Modle': 'model', 'cargo_compartment_length_width_height_mm': 'cargo_compartment_volume_mm3'}) df = df.rename(columns={c: c.lower() for c in df.columns}) df.head()
code
128011626/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/car-specification-dataset-1945-2020/Car Dataset 1945-2020.csv', low_memory=False) dfcolumns = df.columns dfcolumns_indexes = {i: dfcolumns[i] for i in range(len(dfcolumns))} for c in dfcolumns_indexes: col_name = dfcolumns_indexes[c] print(f'{col_name}: {df[col_name].nunique()}')
code
88088751/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.catplot(x='Index', y='Weight', hue='Gender', kind='box', data=data)
code
88088751/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.lmplot(x='Height', y='Weight', hue='Gender', data=data)
code
88088751/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/bmidataset/bmi.csv') data.head()
code
88088751/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.catplot(x='Gender', y='Weight', data=data)
code
88088751/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd from matplotlib import rcParams import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
88088751/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.scatterplot(x='Height', y='Weight', hue='Gender', data=data)
code
88088751/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.countplot(x='Index', data=data)
code
88088751/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] plt.figure(figsize=(40, 16)) sns.barplot(x=data['Height'], y=data['Weight'])
code
88088751/cell_16
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.barplot(x='Index', y='Height', hue='Gender', data=data)
code
88088751/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.barplot(x='Index', y='Weight', hue='Gender', data=data)
code
88088751/cell_10
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.catplot(x='Gender', y='Height', data=data)
code
88088751/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/bmidataset/bmi.csv') X = data.iloc[:, [0, 1, 2]] y = data.iloc[:, [3]] sns.catplot(x='Index', y='Height', hue='Gender', kind='box', data=data)
code
16117222/cell_25
[ "text_plain_output_1.png" ]
import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train.shape data_for_svd = train[:, 1:] data_for_svd.shape
code
16117222/cell_30
[ "text_plain_output_1.png" ]
import cv2 import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape train_sobel_x = np.zeros_like(train_img) train_sobel_y = np.zeros_like(train_img) for i in range(len(train_img)): train_sobel_x[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=1, dy=0, ksize=3) train_sobel_y[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=0, dy=1, ksize=3) # Гистограммы вычисляются с учетом длины вектора градиента train_hist = np.zeros((len(train_img), 16)) for i in range(len(train_img)): hist, borders = np.histogram(train_theta[i], bins=16, range=(0., 2. * np.pi), weights=train_g[i]) train_hist[i] = hist train_hist = train_hist / np.linalg.norm(train_hist, axis=1)[:, None] train.shape data_for_svd = train[:, 1:] data_for_svd.shape data_mean = np.mean(data_for_svd, axis=0) data_for_svd -= data_mean cov_matrix = np.dot(data_for_svd.T, data_for_svd) / data_for_svd.shape[0] U, S, _ = np.linalg.svd(cov_matrix) S_thr = 0.83 S_cumsum = 0 for i in range(S.shape[0]): S_cumsum += S[i] / np.sum(S) if S_cumsum >= S_thr: n_comp = i + 1 break data_reduced = np.dot(data_for_svd, U[:, :n_comp]) data_reduced.shape
code
16117222/cell_33
[ "text_plain_output_1.png" ]
import cv2 import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape train_sobel_x = np.zeros_like(train_img) train_sobel_y = np.zeros_like(train_img) for i in range(len(train_img)): train_sobel_x[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=1, dy=0, ksize=3) train_sobel_y[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=0, dy=1, ksize=3) # Гистограммы вычисляются с учетом длины вектора градиента train_hist = np.zeros((len(train_img), 16)) for i in range(len(train_img)): hist, borders = np.histogram(train_theta[i], bins=16, range=(0., 2. * np.pi), weights=train_g[i]) train_hist[i] = hist train_hist = train_hist / np.linalg.norm(train_hist, axis=1)[:, None] train_hist.shape train.shape data_for_svd = train[:, 1:] data_for_svd.shape data_mean = np.mean(data_for_svd, axis=0) data_for_svd -= data_mean cov_matrix = np.dot(data_for_svd.T, data_for_svd) / data_for_svd.shape[0] U, S, _ = np.linalg.svd(cov_matrix) S_thr = 0.83 S_cumsum = 0 for i in range(S.shape[0]): S_cumsum += S[i] / np.sum(S) if S_cumsum >= S_thr: n_comp = i + 1 break data_reduced = np.dot(data_for_svd, U[:, :n_comp]) data_reduced.shape train_data_svd = data_reduced[:42000] test_data_svd = data_reduced[42000:] (train_data_svd.shape, test_data_svd.shape) train_data = np.hstack((train_hist, train_data_svd)) train_data.shape
code
16117222/cell_6
[ "image_output_1.png" ]
import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape
code
16117222/cell_29
[ "text_plain_output_1.png" ]
import cv2 import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape train_sobel_x = np.zeros_like(train_img) train_sobel_y = np.zeros_like(train_img) for i in range(len(train_img)): train_sobel_x[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=1, dy=0, ksize=3) train_sobel_y[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=0, dy=1, ksize=3) # Гистограммы вычисляются с учетом длины вектора градиента train_hist = np.zeros((len(train_img), 16)) for i in range(len(train_img)): hist, borders = np.histogram(train_theta[i], bins=16, range=(0., 2. * np.pi), weights=train_g[i]) train_hist[i] = hist train_hist = train_hist / np.linalg.norm(train_hist, axis=1)[:, None] train.shape data_for_svd = train[:, 1:] data_for_svd.shape data_mean = np.mean(data_for_svd, axis=0) data_for_svd -= data_mean cov_matrix = np.dot(data_for_svd.T, data_for_svd) / data_for_svd.shape[0] U, S, _ = np.linalg.svd(cov_matrix) S_thr = 0.83 S_cumsum = 0 for i in range(S.shape[0]): S_cumsum += S[i] / np.sum(S) if S_cumsum >= S_thr: n_comp = i + 1 print('n_comp:', n_comp, '\t', 'cumsum:', S_cumsum) break
code
16117222/cell_32
[ "text_plain_output_1.png" ]
import cv2 import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape train_sobel_x = np.zeros_like(train_img) train_sobel_y = np.zeros_like(train_img) for i in range(len(train_img)): train_sobel_x[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=1, dy=0, ksize=3) train_sobel_y[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=0, dy=1, ksize=3) # Гистограммы вычисляются с учетом длины вектора градиента train_hist = np.zeros((len(train_img), 16)) for i in range(len(train_img)): hist, borders = np.histogram(train_theta[i], bins=16, range=(0., 2. * np.pi), weights=train_g[i]) train_hist[i] = hist train_hist = train_hist / np.linalg.norm(train_hist, axis=1)[:, None] train.shape data_for_svd = train[:, 1:] data_for_svd.shape data_mean = np.mean(data_for_svd, axis=0) data_for_svd -= data_mean cov_matrix = np.dot(data_for_svd.T, data_for_svd) / data_for_svd.shape[0] U, S, _ = np.linalg.svd(cov_matrix) S_thr = 0.83 S_cumsum = 0 for i in range(S.shape[0]): S_cumsum += S[i] / np.sum(S) if S_cumsum >= S_thr: n_comp = i + 1 break data_reduced = np.dot(data_for_svd, U[:, :n_comp]) data_reduced.shape train_data_svd = data_reduced[:42000] test_data_svd = data_reduced[42000:] (train_data_svd.shape, test_data_svd.shape)
code
16117222/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape fig = plt.figure(figsize=(20, 10)) for i, img in enumerate(train_img[0:5], 1): subplot = fig.add_subplot(1, 7, i) plt.imshow(img, cmap='gray') subplot.set_title('%s' % train_label[i - 1])
code
16117222/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape fig = plt.figure(figsize=(20, 10)) for i, img in enumerate(train_img[0:5], 1): subplot = fig.add_subplot(1, 7, i) plt.imshow(img, cmap='gray'); subplot.set_title('%s' % train_label[i - 1]); fig = plt.figure(figsize=(20, 10)) for i, img in enumerate(train_g[:5], 1): subplot = fig.add_subplot(1, 7, i) plt.imshow(img, cmap='gray') subplot.set_title('%s' % train_label[i - 1]) subplot = fig.add_subplot(3, 7, i) plt.hist(train_theta[i - 1].flatten(), bins=16, weights=train_g[i - 1].flatten())
code
16117222/cell_24
[ "image_output_1.png" ]
import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train.shape
code
16117222/cell_22
[ "image_output_1.png" ]
import cv2 import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape train_sobel_x = np.zeros_like(train_img) train_sobel_y = np.zeros_like(train_img) for i in range(len(train_img)): train_sobel_x[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=1, dy=0, ksize=3) train_sobel_y[i] = cv2.Sobel(train_img[i], cv2.CV_64F, dx=0, dy=1, ksize=3) # Гистограммы вычисляются с учетом длины вектора градиента train_hist = np.zeros((len(train_img), 16)) for i in range(len(train_img)): hist, borders = np.histogram(train_theta[i], bins=16, range=(0., 2. * np.pi), weights=train_g[i]) train_hist[i] = hist train_hist = train_hist / np.linalg.norm(train_hist, axis=1)[:, None] train_hist.shape
code
16117222/cell_36
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np train = np.loadtxt('../input/train.csv', delimiter=',', skiprows=1) train_label = train[:, 0] train_img = np.resize(train[:, 1:], (train.shape[0], 28, 28)) train_img.shape fig = plt.figure(figsize=(20, 10)) for i, img in enumerate(train_img[0:5], 1): subplot = fig.add_subplot(1, 7, i) plt.imshow(img, cmap='gray'); subplot.set_title('%s' % train_label[i - 1]); fig = plt.figure(figsize=(20, 10)) for i, img in enumerate(train_g[:5], 1): subplot = fig.add_subplot(1, 7, i) plt.imshow(img, cmap='gray'); subplot.set_title('%s' % train_label[i - 1]); subplot = fig.add_subplot(3, 7, i) plt.hist(train_theta[i - 1].flatten(), bins=16, weights=train_g[i - 1].flatten()) h, w = train_img.shape[1:] cX, cY = (int(w * 0.5), int(h * 0.5)) segments = [(0, w, 0, cY), (0, w, cY, h), (0, cX, 0, h), (cX, w, 0, h)] fig = plt.figure(figsize=(16, 4)) for num, i in enumerate(segments, 1): subplot = fig.add_subplot(1, 4, num) plt.imshow(train_img[1, i[0]:i[1], i[2]:i[3]], cmap='gray')
code
128011726/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df1 = pd.read_csv('train_users_2.csv') df1
code
128011726/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
18129647/cell_30
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import logging import os import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') BATCH_SIZE = 100 IMG_SHAPE = 150 train_image_generator = ImageDataGenerator(rescale=1.0 / 255) validation_image_generator = ImageDataGenerator(rescale=1.0 / 255) train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary')
code
18129647/cell_44
[ "text_plain_output_1.png" ]
import logging import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(2, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary()
code
18129647/cell_50
[ "image_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import logging import matplotlib.pyplot as plt import numpy as np import os import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val BATCH_SIZE = 100 IMG_SHAPE = 150 train_image_generator = ImageDataGenerator(rescale=1.0 / 255) validation_image_generator = ImageDataGenerator(rescale=1.0 / 255) train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary') # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(2, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() EPOCHS = 100 history = model.fit_generator(train_data_gen, steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))), epochs=EPOCHS, validation_data=val_data_gen, validation_steps=int(np.ceil(total_val / float(BATCH_SIZE)))) acc = history.history['acc'] val_acc = history.history['val_acc'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs_range = range(EPOCHS) plt.figure(figsize=(8, 8)) plt.subplot(1, 2, 1) plt.plot(epochs_range, acc, label='Training Accuracy') plt.plot(epochs_range, val_acc, label='Validation Accuracy') plt.legend(loc='lower right') plt.title('Training and Validation Accuracy') plt.subplot(1, 2, 2) plt.plot(epochs_range, loss, label='Training Loss') plt.plot(epochs_range, val_loss, label='Validation Loss') plt.legend(loc='upper right') plt.title('Training and Validation Loss') plt.savefig('./foo.png') plt.show()
code
18129647/cell_16
[ "text_plain_output_1.png" ]
zip_dir_base = os.path.dirname(zip_dir) !find $zip_dir_base -type d -print
code
18129647/cell_47
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import logging import numpy as np import os import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val BATCH_SIZE = 100 IMG_SHAPE = 150 train_image_generator = ImageDataGenerator(rescale=1.0 / 255) validation_image_generator = ImageDataGenerator(rescale=1.0 / 255) train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=train_dir, shuffle=True, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary') val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary') model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(64, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.MaxPooling2D(2, 2), tf.keras.layers.Flatten(), tf.keras.layers.Dense(512, activation='relu'), tf.keras.layers.Dense(2, activation='softmax')]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.summary() EPOCHS = 100 history = model.fit_generator(train_data_gen, steps_per_epoch=int(np.ceil(total_train / float(BATCH_SIZE))), epochs=EPOCHS, validation_data=val_data_gen, validation_steps=int(np.ceil(total_val / float(BATCH_SIZE))))
code
18129647/cell_31
[ "text_plain_output_1.png" ]
from tensorflow.keras.preprocessing.image import ImageDataGenerator import logging import os import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') BATCH_SIZE = 100 IMG_SHAPE = 150 train_image_generator = ImageDataGenerator(rescale=1.0 / 255) validation_image_generator = ImageDataGenerator(rescale=1.0 / 255) val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE, directory=validation_dir, shuffle=False, target_size=(IMG_SHAPE, IMG_SHAPE), class_mode='binary')
code
18129647/cell_14
[ "text_plain_output_1.png" ]
import logging import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
code
18129647/cell_22
[ "text_plain_output_1.png" ]
import logging import os import tensorflow as tf import logging logger = tf.get_logger() logger.setLevel(logging.ERROR) _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip' zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True) base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered') train_dir = os.path.join(base_dir, 'train') validation_dir = os.path.join(base_dir, 'validation') train_cats_dir = os.path.join(train_dir, 'cats') train_dogs_dir = os.path.join(train_dir, 'dogs') validation_cats_dir = os.path.join(validation_dir, 'cats') validation_dogs_dir = os.path.join(validation_dir, 'dogs') num_cats_tr = len(os.listdir(train_cats_dir)) num_dogs_tr = len(os.listdir(train_dogs_dir)) num_cats_val = len(os.listdir(validation_cats_dir)) num_dogs_val = len(os.listdir(validation_dogs_dir)) total_train = num_cats_tr + num_dogs_tr total_val = num_cats_val + num_dogs_val print('total training cat images:', num_cats_tr) print('total training dog images:', num_dogs_tr) print('total validation cat images:', num_cats_val) print('total validation dog images:', num_dogs_val) print('--') print('Total training images:', total_train) print('Total validation images:', total_val)
code
18129647/cell_37
[ "image_output_1.png" ]
import matplotlib.pyplot as plt # This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column. def plotImages(images_arr): fig, axes = plt.subplots(1, 5, figsize=(20,20)) axes = axes.flatten() for img, ax in zip( images_arr, axes): ax.imshow(img) plt.tight_layout() plt.show() plotImages(sample_training_images[:5])
code
2000944/cell_4
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd def get_xyz_data(filename): pos_data = [] lat_data = [] with open(filename) as f: for line in f.readlines(): x = line.split() if x[0] == 'atom': pos_data.append([np.array(x[1:4], dtype=np.float), x[4]]) elif x[0] == 'lattice_vector': lat_data.append(np.array(x[1:4], dtype=np.float)) A = np.transpose(lat_data) B = np.linalg.inv(A) R = pos_data[0][0] return np.matmul(B, R) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') for c in train.columns: if c.find('angle') != -1: print(c) train[c] = np.radians(train[c]) test[c] = np.radians(test[c]) traindata = np.zeros((train.shape[0], 3)) for i, idx in enumerate(train.id.values): fn = '../input/train/{}/geometry.xyz'.format(idx) data = get_xyz_data(fn) traindata[i, :] = data testdata = np.zeros((test.shape[0], 3)) for i, idx in enumerate(test.id.values): fn = '../input/test/{}/geometry.xyz'.format(idx) data = get_xyz_data(fn) testdata[i, :] = data train['a0'] = 0 train['a1'] = 0 train['a2'] = 0 train[['a0', 'a1', 'a2']] = traindata test['a0'] = 0 test['a1'] = 0 test['a2'] = 0 test[['a0', 'a1', 'a2']] = testdata train.number_of_total_atoms = np.log(train.number_of_total_atoms) test.number_of_total_atoms = np.log(test.number_of_total_atoms) alldata = pd.concat([train, test]) alldata = pd.concat([alldata.drop(['spacegroup'], axis=1), pd.get_dummies(alldata['spacegroup'], prefix='SG')], axis=1) train = alldata[:train.shape[0]].copy() test = alldata[train.shape[0]:].copy() target_fe = np.log1p(train.formation_energy_ev_natom) target_be = np.log1p(train.bandgap_energy_ev) del train['formation_energy_ev_natom'], train['bandgap_energy_ev'], train['id'], test['id']
code
105190994/cell_42
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:] df[df.A < 0] df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 iris.groupby('Species').apply(np.mean) iris = pd.read_csv(CSV_PATH) iris.to_csv('iris.csv', index=False) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df missing = df.isnull().sum() missing = missing[missing > 0] missing.sort_values(inplace=True) missing_df = pd.DataFrame({'col': missing.index, 'num_missing': missing.values}) plt.figure(figsize=(14, 7)) plt.title('Missing Map') f = sns.barplot(y='col', x='num_missing', data=missing_df) f.figure
code
105190994/cell_21
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df[0:3]
code
105190994/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2
code
105190994/cell_23
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:]
code
105190994/cell_30
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] iris.groupby('Species').apply(np.mean)
code
105190994/cell_33
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 iris.groupby('Species').apply(np.mean) iris = pd.read_csv(CSV_PATH) iris.to_csv('iris.csv', index=False) iris.to_numpy()
code
105190994/cell_44
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:] df[df.A < 0] df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 iris.groupby('Species').apply(np.mean) iris = pd.read_csv(CSV_PATH) iris.to_csv('iris.csv', index=False) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df missing = df.isnull().sum() missing = missing[missing > 0] missing.sort_values(inplace=True) missing_df = pd.DataFrame({'col': missing.index, 'num_missing': missing.values}) plt.figure(figsize=(14,7)) plt.title('Missing Map') f = sns.barplot(y='col', x='num_missing', data=missing_df) f.figure corr = df.drop(['Id'], axis=1).corr() mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True f, ax = plt.subplots(figsize=(11, 11)) cmap = sns.diverging_palette(220, 10, as_cmap=True) f = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=0.3, center=0, square=True, linewidths=0.5, cbar_kws={'shrink': 0.5}) f.figure
code
105190994/cell_6
[ "image_output_1.png" ]
import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) iris.info()
code
105190994/cell_29
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 df5.fillna(0)
code
105190994/cell_39
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:] df[df.A < 0] df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 iris.groupby('Species').apply(np.mean) iris = pd.read_csv(CSV_PATH) iris.to_csv('iris.csv', index=False) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df
code
105190994/cell_41
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import seaborn as sns import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.loc[:, 'A'] df.iloc[0:2, 3:] df[df.A < 0] df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5 iris.groupby('Species').apply(np.mean) iris = pd.read_csv(CSV_PATH) iris.to_csv('iris.csv', index=False) import seaborn as sns df = pd.read_csv('../input/house-prices-advanced-regression-techniques/train.csv') df missing = df.isnull().sum() missing = missing[missing > 0] missing.sort_values(inplace=True) missing_df = pd.DataFrame({'col': missing.index, 'num_missing': missing.values}) sns.barplot(x='col', y='num_missing', data=missing_df)
code
105190994/cell_7
[ "image_output_1.png" ]
import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) iris.describe()
code
105190994/cell_18
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df (df.head(3), df.tail(2), df.columns, df.index, df.shape) df.describe()
code
105190994/cell_28
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) nrow = 5 n_feateures = 4 dates = pd.date_range('20220101', periods=nrow, freq='s') df = pd.DataFrame(np.random.randn(nrow, n_feateures), index=dates, columns=list('ABCD')) df t = np.arange(5) sin_t = np.sin(t) cos_t = np.cos(t) exp_t = np.exp(t) df2 = pd.DataFrame({'t': t, 'sin': sin_t, 'cos': cos_t, 'exp': exp_t}) df2 df1 = pd.DataFrame(np.random.rand(2, 4)) df2 = pd.DataFrame(np.random.rand(1, 4)) df3 = pd.DataFrame(np.random.rand(3, 5)) df_list = [df1, df2, df3] df4 = pd.concat(df_list, axis=0) df4 df5 = pd.concat(df_list, axis=1) df5
code
105190994/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas_profiling as pp import warnings import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import pandas_profiling as pp import seaborn as sns import warnings import os import warnings warnings.filterwarnings(action='ignore') CSV_PATH = '../input/iris-files/iris.csv' iris = pd.read_csv(CSV_PATH) pp.ProfileReport(iris)
code