path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
72107386/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv') test = pd.read_csv('../input/30-days-of-ml/test.csv') print('The training set is {}.'.format(train.shape)) print('The test set is {}.'.format(test.shape))
code
17141799/cell_21
[ "text_html_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import LabelEncoder from tensorflow.python.framework import ops import math import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns import tensorflow as tf dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) class CategoricalImputer: def __init__(self, columns=None, strategy='most_frequent'): self.columns = columns self.strategy = strategy def fit(self, X, y=None): if self.columns is None: self.columns = X.columns if self.strategy is 'most_frequent': self.fill = {column: X[column].value_counts().index[0] for column in self.columns} else: self.fill = {column: '0' for column in self.columns} return self def transform(self, X): for column in self.columns: X[column] = X[column].fillna(self.fill[column]) return X def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket'] class Categorical_Encoder(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns self.encoders = None def fit(self, data, target=None): """ Expects a data frame with named columns to encode. """ if self.columns is None: self.columns = data.columns self.encoders = {column: LabelEncoder().fit(data[column]) for column in self.columns} return self def transform(self, data): """ Uses the encoders to transform a data frame. """ output = data.copy() for column, encoder in self.encoders.items(): output[column] = encoder.transform(data[column]) return output categorical_features = {column: list(dataset[column].unique()) for column in dataset.columns if dataset[column].dtype == 'object'} encoder = Categorical_Encoder(categorical_features.keys()) dataset = encoder.fit_transform(dataset) data = dataset.values X_train = np.float32(data[:, [0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12]]) Y_train = data[:, [13]] def computeCost(A, y): """ Computes the cost using the sigmoid cross entropy Arguments: logits -- vector containing z, output of the last linear unit (before the final sigmoid activation) labels -- vector of labels y (1 or 0) Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels" in the TensorFlow documentation. So logits will feed into z, and labels into y. Returns: cost -- runs the session of the cost (formula (2)) """ A = tf.cast(A, tf.float32) y = tf.cast(y, tf.float32) cross_entropy_cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=A, labels=y) return cross_entropy_cost def Xavier_Intializer(dim): tf.set_random_seed(1) w = tf.get_variable('w', [dim, 1], initializer=tf.contrib.layers.xavier_initializer(seed=1)) b = 0 assert w.shape == (dim, 1) assert isinstance(b, float) or isinstance(b, int) return (w, b) def propagate(X, y, w, b): Z = tf.add(tf.matmul(X, w), b) A = tf.sigmoid(Z) return A def random_mini_batches(X, Y, mini_batch_size, seed=0): m = X.shape[1] mini_batches = [] np.random.seed(seed) permutation = list(np.random.permutation(m)) shuffled_X = X[:, permutation] shuffled_Y = Y[:, permutation].reshape((Y.shape[0], m)) num_complete_minibatches = math.floor(m / mini_batch_size) for k in range(0, num_complete_minibatches): mini_batch_X = shuffled_X[:, k * mini_batch_size:k * mini_batch_size + mini_batch_size] mini_batch_Y = shuffled_Y[:, k * mini_batch_size:k * mini_batch_size + mini_batch_size] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) if m % mini_batch_size != 0: mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size:m] mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size:m] mini_batch = (mini_batch_X, mini_batch_Y) mini_batches.append(mini_batch) return mini_batches def model(X_train, Y_train, minibatch_size, learning_rate=0.005, num_iterations=1500, num_epochs=1500, print_cost=True): ops.reset_default_graph() m, n_x = X_train.shape n_y = Y_train.shape[0] costs = [] seed = 3 w, b = Xavier_Intializer(n_x) A = propagate(X_train, Y_train, w, b) cost = computeCost(A, Y_train) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(num_epochs): epoch_cost = 0.0 num_minibatches = int(m / minibatch_size) seed = seed + 1 minibatches = random_mini_batches(X_train.T, Y_train, minibatch_size, seed) for minibatch in minibatches: minibatch_X, minibatch_Y = minibatch _, minibatch_cost = sess.run([optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}) epoch_cost += minibatch_cost / num_minibatches if print_cost == True and epoch % 5 == 0: costs.append(epoch_cost) w = sess.run(w) b = sess.run(b) return (w, b) w, b = model(X_train, Y_train, minibatch_size=256)
code
17141799/cell_9
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None dataset.hist(column=['Age', 'Education', 'hours_per_week'], figsize=(6, 5)) pylab.suptitle('Analyzing distribution for the dataset', fontsize='xx-large') Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket']
code
17141799/cell_4
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) dataset.head()
code
17141799/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) dataset.head()
code
17141799/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import tensorflow as tf from tensorflow.python.framework import ops import sklearn from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pylab from sklearn.preprocessing import LabelEncoder from sklearn.base import BaseEstimator, TransformerMixin import seaborn as sns import math import os print(os.listdir('../input'))
code
17141799/cell_7
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) obj = CategoricalImputer(columns=['workclass', 'Occupation', 'Native_Country']) train_result = obj.fit(dataset[['workclass', 'Occupation', 'Native_Country']]) dataset[['workclass', 'Occupation', 'Native_Country']] = train_result.transform(dataset[['workclass', 'Occupation', 'Native_Country']]) test_obj = CategoricalImputer(columns=['workclass', 'Occupation', 'Native_Country']) test_result = test_obj.fit(test_dataset[['workclass', 'Occupation', 'Native_Country']]) test_dataset[['workclass', 'Occupation', 'Native_Country']] = test_result.transform(test_dataset[['workclass', 'Occupation', 'Native_Country']])
code
17141799/cell_15
[ "text_html_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) class CategoricalImputer: def __init__(self, columns=None, strategy='most_frequent'): self.columns = columns self.strategy = strategy def fit(self, X, y=None): if self.columns is None: self.columns = X.columns if self.strategy is 'most_frequent': self.fill = {column: X[column].value_counts().index[0] for column in self.columns} else: self.fill = {column: '0' for column in self.columns} return self def transform(self, X): for column in self.columns: X[column] = X[column].fillna(self.fill[column]) return X def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket'] class Categorical_Encoder(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns self.encoders = None def fit(self, data, target=None): """ Expects a data frame with named columns to encode. """ if self.columns is None: self.columns = data.columns self.encoders = {column: LabelEncoder().fit(data[column]) for column in self.columns} return self def transform(self, data): """ Uses the encoders to transform a data frame. """ output = data.copy() for column, encoder in self.encoders.items(): output[column] = encoder.transform(data[column]) return output categorical_features = {column: list(dataset[column].unique()) for column in dataset.columns if dataset[column].dtype == 'object'} encoder = Categorical_Encoder(categorical_features.keys()) dataset = encoder.fit_transform(dataset) categorical_features = {column: list(test_dataset[column].unique()) for column in test_dataset.columns if test_dataset[column].dtype == 'object'} encoder = Categorical_Encoder(categorical_features.keys()) test_dataset = encoder.fit_transform(test_dataset) test_dataset.head()
code
17141799/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) test_dataset.head()
code
17141799/cell_12
[ "text_html_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) class CategoricalImputer: def __init__(self, columns=None, strategy='most_frequent'): self.columns = columns self.strategy = strategy def fit(self, X, y=None): if self.columns is None: self.columns = X.columns if self.strategy is 'most_frequent': self.fill = {column: X[column].value_counts().index[0] for column in self.columns} else: self.fill = {column: '0' for column in self.columns} return self def transform(self, X): for column in self.columns: X[column] = X[column].fillna(self.fill[column]) return X def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket'] class Categorical_Encoder(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns self.encoders = None def fit(self, data, target=None): """ Expects a data frame with named columns to encode. """ if self.columns is None: self.columns = data.columns self.encoders = {column: LabelEncoder().fit(data[column]) for column in self.columns} return self def transform(self, data): """ Uses the encoders to transform a data frame. """ output = data.copy() for column, encoder in self.encoders.items(): output[column] = encoder.transform(data[column]) return output categorical_features = {column: list(dataset[column].unique()) for column in dataset.columns if dataset[column].dtype == 'object'} encoder = Categorical_Encoder(categorical_features.keys()) dataset = encoder.fit_transform(dataset) dataset.head()
code
17141799/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) test_dataset.head()
code
16145643/cell_21
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20] for c in df.columns: if df[c].dtypes == 'O': df[c].fillna(value='none', inplace=True) else: df[c].fillna(value=0, inplace=True) objects_list = ['MSSubClass'] linkert_list = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'HeatingQC', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PavedDrive', 'Fence'] for c in df.columns: if df[c].dtypes == 'O': d = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).mean() d = round(d / df['SalePrice'].mean() * 100, 1) d.name = 'Mean' b = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).std() b = round(b / df['SalePrice'].std() * 100, 1) b.name = 'std' a = [] for c in linkert_list: a.append(df[c].unique().tolist()) label_values = pd.DataFrame(a).T label_values.columns = linkert_list label_values label_loc = [4, 7, 8, 11, 12] for i in label_loc: label_values.iloc[:, i].dropna().tolist()
code
16145643/cell_25
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20] for c in df.columns: if df[c].dtypes == 'O': df[c].fillna(value='none', inplace=True) else: df[c].fillna(value=0, inplace=True) for c in df.columns: if df[c].dtypes == 'O': d = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).mean() d = round(d / df['SalePrice'].mean() * 100, 1) d.name = 'Mean' b = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).std() b = round(b / df['SalePrice'].std() * 100, 1) b.name = 'std' categorical_cols = df.columns[df.dtypes == 'object'] labeldict = defaultdict(LabelEncoder) labelfit = df[categorical_cols].apply(lambda x: labeldict[x.name].fit_transform(x)) df[categorical_cols] = labelfit
code
16145643/cell_20
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20] for c in df.columns: if df[c].dtypes == 'O': df[c].fillna(value='none', inplace=True) else: df[c].fillna(value=0, inplace=True) objects_list = ['MSSubClass'] linkert_list = ['ExterQual', 'ExterCond', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'HeatingQC', 'KitchenQual', 'Functional', 'FireplaceQu', 'GarageQual', 'GarageCond', 'PavedDrive', 'Fence'] for c in df.columns: if df[c].dtypes == 'O': d = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).mean() d = round(d / df['SalePrice'].mean() * 100, 1) d.name = 'Mean' b = df.set_index(c)['SalePrice'].groupby(axis=0, level=0).std() b = round(b / df['SalePrice'].std() * 100, 1) b.name = 'std' a = [] for c in linkert_list: a.append(df[c].unique().tolist()) label_values = pd.DataFrame(a).T label_values.columns = linkert_list label_values
code
16145643/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import missingno import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') missingno.matrix(df, figsize=(30, 5))
code
16145643/cell_11
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20] for c in df.columns: if df[c].dtypes == 'O': df[c].fillna(value='none', inplace=True) else: df[c].fillna(value=0, inplace=True) df.head()
code
16145643/cell_1
[ "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os print(os.listdir('../input')) import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all'
code
16145643/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20]
code
16145643/cell_38
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=100) clf.fit(x_train, y_train) clf.score(x_train, y_train)
code
16145643/cell_3
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.head()
code
16145643/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os import matplotlib.pyplot as plt import seaborn as sns import missingno pd.set_option('display.max_columns', 1000) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = 'all' df = pd.read_csv('../input/train.csv') d_test = pd.read_csv('../input/test.csv') df.isna().sum().sort_values(ascending=False)[:20] for c in df.columns: if df[c].dtypes == 'O': df[c].fillna(value='none', inplace=True) else: df[c].fillna(value=0, inplace=True) df.iloc[:, -1].head()
code
16145643/cell_22
[ "text_plain_output_1.png" ]
sortlabels = defaultdict() ['No', 'Gd', 'Mn', 'Av', 'none'] ['1.No', '4.Gd', '2.Mn', '3.Av', '0.none'] ['Typ', 'Min1', 'Maj1', 'Min2', 'Mod', 'Maj2', 'Sev'] ['none', 'TA', 'Gd', 'Fa', 'Ex', 'Po'] ['0.none', '3.TA', '4.Gd', '2.Fa', '5.Ex', '1.Po'] ['Y', 'N', 'P'] ['3.Y', '1.N', '2.P'] ['none', 'MnPrv', 'GdWo', 'GdPrv', 'MnWw'] ['0.none', '3.MnPrv', '2.GdWo', '4.GdPrv', '1.MnWw']
code
17115300/cell_9
[ "text_plain_output_1.png" ]
from sklearn import model_selection from sklearn.ensemble import AdaBoostClassifier from sklearn.ensemble import BaggingClassifier from sklearn.preprocessing import MinMaxScaler from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) url = '../input/data.csv' df = pd.read_csv(url, index_col=0) df.replace('?', 0, inplace=True) df.diagnosis = pd.get_dummies(df.diagnosis) df = df.drop(labels='Unnamed: 32', axis=1) scaler = MinMaxScaler(feature_range=(0, 1)) normalizedData = scaler.fit_transform(df.values) names = list(df.columns) normalizedData = pd.DataFrame(normalizedData) normalizedData.columns = names x = normalizedData.loc[:, normalizedData.columns != 'diagnosis'] y = normalizedData.diagnosis kfold = model_selection.KFold(n_splits=10, random_state=17) cart = DecisionTreeClassifier() num_trees = 100 model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=17) results = model_selection.cross_val_score(model, x, y, cv=kfold) from sklearn.ensemble import AdaBoostClassifier seed = 17 num_trees = 100 kfold = model_selection.KFold(n_splits=10, random_state=seed) model = AdaBoostClassifier(n_estimators=num_trees, random_state=seed) results = model_selection.cross_val_score(model, x, y, cv=kfold) print(results.mean())
code
17115300/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) url = '../input/data.csv' df = pd.read_csv(url, index_col=0) df.head(3)
code
17115300/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17115300/cell_8
[ "text_plain_output_1.png" ]
from sklearn import model_selection from sklearn.ensemble import BaggingClassifier from sklearn.preprocessing import MinMaxScaler from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) url = '../input/data.csv' df = pd.read_csv(url, index_col=0) df.replace('?', 0, inplace=True) df.diagnosis = pd.get_dummies(df.diagnosis) df = df.drop(labels='Unnamed: 32', axis=1) scaler = MinMaxScaler(feature_range=(0, 1)) normalizedData = scaler.fit_transform(df.values) names = list(df.columns) normalizedData = pd.DataFrame(normalizedData) normalizedData.columns = names x = normalizedData.loc[:, normalizedData.columns != 'diagnosis'] y = normalizedData.diagnosis kfold = model_selection.KFold(n_splits=10, random_state=17) cart = DecisionTreeClassifier() num_trees = 100 model = BaggingClassifier(base_estimator=cart, n_estimators=num_trees, random_state=17) results = model_selection.cross_val_score(model, x, y, cv=kfold) print(results.mean())
code
88099839/cell_25
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] sns.pairplot(data=pair_plot_data, kind='reg', diag_kws={'bins': 20, 'color': 'darkblue', 'edgecolor': 'black'}, plot_kws={'marker': '.', 'color': 'darkred'}, size=2.5)
code
88099839/cell_57
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] Group = np.repeat(np.array(['H1', 'H2', 'H3']), [30, 50, 76], axis=0) Group = pd.Series(Group) countplot_data = data['Country or region'] countplot_data['Group'] = Group boxplot_data = data.drop(['Overall rank', 'Country or region', 'Score'], axis=1) props = dict(boxes='darkblue', whiskers='black', medians='red', caps='black') boxplot_data.plot.box(color=props, patch_artist=True, rot=45, figsize=(10, 5)) group = np.repeat(np.array(['T1', 'T2', 'T3', 'T4']), [39, 39, 39, 39], axis=0).tolist() df = boxplot_data.copy() df['Group'] = group props = dict(boxes='darkblue', whiskers='black', medians='red', caps='black') df.groupby(by='Group').boxplot(column='GDP per capita', patch_artist=True, color=props, layout=(1, 4), fontsize=15, figsize=(15, 5))
code
88099839/cell_56
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] Group = np.repeat(np.array(['H1', 'H2', 'H3']), [30, 50, 76], axis=0) Group = pd.Series(Group) countplot_data = data['Country or region'] countplot_data['Group'] = Group boxplot_data = data.drop(['Overall rank', 'Country or region', 'Score'], axis=1) props = dict(boxes='darkblue', whiskers='black', medians='red', caps='black') boxplot_data.plot.box(color=props, patch_artist=True, rot=45, figsize=(10, 5)) group = np.repeat(np.array(['T1', 'T2', 'T3', 'T4']), [39, 39, 39, 39], axis=0).tolist() df = boxplot_data.copy() df['Group'] = group df.head(3)
code
88099839/cell_34
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pd.plotting.scatter_matrix(scatter_matrix_data, diagonal='hist', figsize=(8, 8), color='darkgreen', marker='.', alpha=0.8, s=60, hist_kwds={'bins': 15, 'color': 'darkblue', 'edgecolor': 'black'}) plt.suptitle('Scatter Matrix of Data', fontsize=15)
code
88099839/cell_23
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]]
code
88099839/cell_30
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0)
code
88099839/cell_20
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) fig, ax = plt.subplots(figsize=(8, 8)) cmap = plt.get_cmap('magma', 30) ax = sns.heatmap(correlation_matrix_data, annot=True, fmt='0.3f', square=True, cmap=cmap, cbar_kws={'shrink': 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13)
code
88099839/cell_55
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() boxplot_data = data.drop(['Overall rank', 'Country or region', 'Score'], axis=1) props = dict(boxes='darkblue', whiskers='black', medians='red', caps='black') boxplot_data.plot.box(color=props, patch_artist=True, rot=45, figsize=(10, 5))
code
88099839/cell_39
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() histogram_data = data[['Score', 'GDP per capita']] histogram_data.head(3)
code
88099839/cell_41
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() histogram_data = data[['Score', 'GDP per capita']] histogram_data.plot.hist(bins=40, color=['darkblue', 'darkgreen'], figsize=(10, 4), edgecolor='black', lw=1)
code
88099839/cell_11
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') data.head()
code
88099839/cell_50
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] histogram_data = data[['Score', 'GDP per capita']] # Histogram function with matplotlib 🍊 fig,ax= plt.subplots(figsize=(10,4)) ax.hist(histogram_data['Score'], bins=30, color='darkblue',histtype='stepfilled', edgecolor='black', label='Happiness Score') ax.hist(histogram_data['GDP per capita'], bins=10, color='darkgreen',histtype='barstacked', edgecolor='black', label='GDP') ax.legend(prop ={'size': 10}); # Histogram function with seaborn πŸ‹ sns.set(rc={'figure.figsize':(10,4)}) # set figure size sns.set_theme(style='whitegrid') # theme of the graph ax=sns.histplot(x='Score', data=histogram_data, bins=30, color='darkgreen', edgecolor='black', kde=True) ax=sns.histplot(x='GDP per capita', data=histogram_data, bins=10, color='darkblue', edgecolor='black', kde=True) Group = np.repeat(np.array(['H1', 'H2', 'H3']), [30, 50, 76], axis=0) Group = pd.Series(Group) countplot_data = data['Country or region'] countplot_data['Group'] = Group sns.set(rc={'figure.figsize': (8, 6)}) sns.set_theme(style='whitegrid') ax = sns.countplot(x='Group', data=countplot_data, palette='viridis', edgecolor='black') ax.set_title('Count Plot of Groups', fontsize=15) ax.set_xlabel('Groups of Countries')
code
88099839/cell_45
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) histogram_data = data[['Score', 'GDP per capita']] sns.set(rc={'figure.figsize': (10, 4)}) sns.set_theme(style='whitegrid') ax = sns.histplot(x='Score', data=histogram_data, bins=30, color='darkgreen', edgecolor='black', kde=True) ax = sns.histplot(x='GDP per capita', data=histogram_data, bins=10, color='darkblue', edgecolor='black', kde=True)
code
88099839/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() fig, ax = plt.subplots(figsize=(6, 6)) cp = ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax, fraction=0.046, pad=0.04)
code
88099839/cell_59
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] histogram_data = data[['Score', 'GDP per capita']] # Histogram function with matplotlib 🍊 fig,ax= plt.subplots(figsize=(10,4)) ax.hist(histogram_data['Score'], bins=30, color='darkblue',histtype='stepfilled', edgecolor='black', label='Happiness Score') ax.hist(histogram_data['GDP per capita'], bins=10, color='darkgreen',histtype='barstacked', edgecolor='black', label='GDP') ax.legend(prop ={'size': 10}); # Histogram function with seaborn πŸ‹ sns.set(rc={'figure.figsize':(10,4)}) # set figure size sns.set_theme(style='whitegrid') # theme of the graph ax=sns.histplot(x='Score', data=histogram_data, bins=30, color='darkgreen', edgecolor='black', kde=True) ax=sns.histplot(x='GDP per capita', data=histogram_data, bins=10, color='darkblue', edgecolor='black', kde=True) Group = np.repeat(np.array(['H1', 'H2', 'H3']), [30, 50, 76], axis=0) Group = pd.Series(Group) countplot_data = data['Country or region'] countplot_data['Group'] = Group sns.set(rc={'figure.figsize':(8,6)}) # set figure size sns.set_theme(style='whitegrid') # Count plot function in Seaborn ax= sns.countplot(x='Group', data=countplot_data, palette='viridis', edgecolor='black') ax.set_title('Count Plot of Groups', fontsize=15) ax.set_xlabel('Groups of Countries'); boxplot_data = data.drop(['Overall rank', 'Country or region', 'Score'], axis=1) props = dict(boxes='darkblue', whiskers='black', medians='red', caps='black') boxplot_data.plot.box(color=props, patch_artist=True, rot=45, figsize=(10, 5)) group = np.repeat(np.array(['T1', 'T2', 'T3', 'T4']), [39, 39, 39, 39], axis=0).tolist() df = boxplot_data.copy() df['Group'] = group fig, ax = plt.subplots(figsize=(10, 5)) ax.boxplot(boxplot_data.to_numpy(), widths=0.55, patch_artist=True, notch=True) ax.set_title('Boxplot of Data', fontsize=17) ax.set_xlabel('numeric variables', fontsize=12) ax.set_ylabel('quantiles of values', fontsize=12)
code
88099839/cell_43
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] grid = sns.PairGrid(data=pair_plot_data, size=2.5, hue='Group') grid = grid.map_upper(plt.scatter, linewidths=0.3, edgecolor='black', alpha=0.7) grid = grid.map_diag(sns.kdeplot, fill=True, bw=0.3) grid = grid.map_lower(plt.hist2d, bins=30, lw=0.0) scatter_matrix_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] histogram_data = data[['Score', 'GDP per capita']] fig, ax = plt.subplots(figsize=(10, 4)) ax.hist(histogram_data['Score'], bins=30, color='darkblue', histtype='stepfilled', edgecolor='black', label='Happiness Score') ax.hist(histogram_data['GDP per capita'], bins=10, color='darkgreen', histtype='barstacked', edgecolor='black', label='GDP') ax.legend(prop={'size': 10})
code
88099839/cell_53
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() boxplot_data = data.drop(['Overall rank', 'Country or region', 'Score'], axis=1) boxplot_data.head(3)
code
88099839/cell_27
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns data = pd.read_csv('../input/world-happiness/2019.csv') correlation_matrix_data = data.corr() # Correlation Matrix function in Matplotlib πŸ‘©β€πŸš’ fig, ax= plt.subplots(figsize=(6,6)) cp=ax.matshow(correlation_matrix_data) ax.set_title('Correlation Matrix Plot') # Annotate the values into correlation plot for (i, j), z in np.ndenumerate(correlation_matrix_data): ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center') fig.colorbar(cp, ax=ax,fraction=0.046, pad=0.04); # fraction=0.046, pad=0.04 seem like a magical combination to keep the colorbar scaled to the plot all the time πŸ™„ -Seaborn -sns.heatmap(df.corr()) # Correlation Matrix function in Seaborn πŸ‘¨β€πŸš€ fig, ax= plt.subplots(figsize=(8,8)) cmap = plt.get_cmap('magma',30) ax= sns.heatmap(correlation_matrix_data, annot=True, fmt="0.3f", square=True ,cmap=cmap, cbar_kws={"shrink": 0.8}) ax.set_title('Correlation Matrix Plot', fontsize=13); # annot=True: annotate values into the corr matrix # cbar_kws={"shrink": 0.8} : change the size of the colorbar to 0.8 of the plot πŸ™†β€β™€οΈ Group = np.repeat(np.array(['Q1', 'Q2', 'Q3']), [52, 52, 52], axis=0) Group = pd.Series(Group) pair_plot_data = data[['Social support', 'Healthy life expectancy', 'Score', 'GDP per capita']] pair_plot_data['Group'] = Group pair_plot_data.iloc[[0, 52, 104]] sns.pairplot(data=pair_plot_data, hue='Group', palette='magma', size=2.5)
code
129028814/cell_4
[ "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.loc[df.DATE_DIED != '9999-99-99', 'DATE_DIED'] = 1 df.loc[df.DATE_DIED == '9999-99-99', 'DATE_DIED'] = 2 age_range = [-float('inf'), 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, float('inf')] df['AGE'] = pd.cut(df['AGE'], age_range, labels=False) sns.catplot(x='AGE', y='CLASIFFICATION_FINAL', data=df, kind='bar') sns.catplot(x='AGE', data=df, kind='count')
code
129028814/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.loc[df.DATE_DIED != '9999-99-99', 'DATE_DIED'] = 1 df.loc[df.DATE_DIED == '9999-99-99', 'DATE_DIED'] = 2 columns = ['USMER', 'MEDICAL_UNIT', 'SEX', 'PATIENT_TYPE', 'DATE_DIED', 'INTUBED', 'PNEUMONIA', 'PREGNANT', 'DIABETES', 'COPD', 'ASTHMA', 'INMSUPR', 'HIPERTENSION', 'OTHER_DISEASE', 'CARDIOVASCULAR', 'OBESITY', 'RENAL_CHRONIC', 'TOBACCO', 'ICU'] for col in columns: print(col, '= 1') print('mean:', df[df[col] == 1]['CLASIFFICATION_FINAL'].count()) print(col, '= 2') print('mean:', df[df[col] == 2]['CLASIFFICATION_FINAL'].count()) print('--------------------')
code
129028814/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.head()
code
129028814/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129028814/cell_8
[ "image_output_4.png", "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.loc[df.DATE_DIED != '9999-99-99', 'DATE_DIED'] = 1 df.loc[df.DATE_DIED == '9999-99-99', 'DATE_DIED'] = 2 age_range = [-float('inf'), 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, float('inf')] df['AGE'] = pd.cut(df['AGE'], age_range, labels=False) df_pos = df[df.CLASIFFICATION_FINAL < 4] df_neg = df[df.CLASIFFICATION_FINAL >= 4] age_range = [-float('inf'), 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, float('inf')] df_pos['AGE'] = pd.cut(df_pos['AGE'], age_range, labels=False) sns.catplot(x='AGE', y='CLASIFFICATION_FINAL', data=df_pos, kind='bar') sns.catplot(x='AGE', data=df_pos, kind='count') df_neg['AGE'] = pd.cut(df_neg['AGE'], age_range, labels=False) sns.catplot(x='AGE', y='CLASIFFICATION_FINAL', data=df_neg, kind='bar') sns.catplot(x='AGE', data=df_neg, kind='count')
code
129028814/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.loc[df.DATE_DIED != '9999-99-99', 'DATE_DIED'] = 1 df.loc[df.DATE_DIED == '9999-99-99', 'DATE_DIED'] = 2 df.tail()
code
129028814/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import matplotlib.pyplot as plt import seaborn as sns df = pd.read_csv('/kaggle/input/covid19-dataset/Covid Data.csv') df.loc[df.DATE_DIED != '9999-99-99', 'DATE_DIED'] = 1 df.loc[df.DATE_DIED == '9999-99-99', 'DATE_DIED'] = 2 columns = ['USMER', 'MEDICAL_UNIT', 'SEX', 'PATIENT_TYPE', 'DATE_DIED', 'INTUBED', 'PNEUMONIA', 'PREGNANT', 'DIABETES', 'COPD', 'ASTHMA', 'INMSUPR', 'HIPERTENSION', 'OTHER_DISEASE', 'CARDIOVASCULAR', 'OBESITY', 'RENAL_CHRONIC', 'TOBACCO', 'ICU'] for col in columns: print(col, '= 1') print('mean:', df[df[col] == 1]['CLASIFFICATION_FINAL'].mean()) print(col, '= 2') print('mean:', df[df[col] == 2]['CLASIFFICATION_FINAL'].mean()) print('--------------------')
code
90129087/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() train['Age'].hist(bins=30, color='orange', alpha=0.7)
code
90129087/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() sns.countplot(x='Survived', data=train, hue='Sex', palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull()
code
90129087/cell_25
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True) fig.suptitle('Age distribution vs Survival') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[0], color='red') axes[0].set_title('Not survived distibution') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[1], color='darkturquoise') axes[1].set_title('Survived distibution') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[2], label ='Not survived', color='red') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[2], label ='Survived', color='darkturquoise') axes[2].set_title('Survived and not Survived') plt.legend() sns.countplot(x='SibSp', hue='Survived', data=train, palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True) fig.suptitle('Age distribution vs Survival') sns.histplot(train[train['Survived'] == 0]['Age'], ax=axes[0], color='red') axes[0].set_title('Not survived distibution') sns.histplot(train[train['Survived'] == 1]['Age'], ax=axes[1], color='darkturquoise') axes[1].set_title('Survived distibution') sns.histplot(train[train['Survived'] == 0]['Age'], ax=axes[2], label='Not survived', color='red') sns.histplot(train[train['Survived'] == 1]['Age'], ax=axes[2], label='Survived', color='darkturquoise') axes[2].set_title('Survived and not Survived') plt.legend()
code
90129087/cell_30
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True) fig.suptitle('Age distribution vs Survival') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[0], color='red') axes[0].set_title('Not survived distibution') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[1], color='darkturquoise') axes[1].set_title('Survived distibution') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[2], label ='Not survived', color='red') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[2], label ='Survived', color='darkturquoise') axes[2].set_title('Survived and not Survived') plt.legend() sns.countplot(x='Parch', hue='Survived', data=train, palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.head(3)
code
90129087/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1, 2, figsize=(12, 6)) train['Survived'].value_counts().plot.pie(explode=[0, 0.1], autopct='%1.1f%%', ax=ax[0], shadow=True, startangle=90, colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show()
code
90129087/cell_19
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() sns.countplot(x='Parch', hue='Survived', data=train, palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90129087/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns
code
90129087/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True) fig.suptitle('Age distribution vs Survival') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[0], color='red') axes[0].set_title('Not survived distibution') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[1], color='darkturquoise') axes[1].set_title('Survived distibution') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[2], label ='Not survived', color='red') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[2], label ='Survived', color='darkturquoise') axes[2].set_title('Survived and not Survived') plt.legend() sns.countplot(x='Embarked', hue='Survived', data=train, palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() fig, axes = plt.subplots(1, 3, figsize=(15, 5), sharey=True) fig.suptitle('Age distribution vs Survival') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[0], color='red') axes[0].set_title('Not survived distibution') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[1], color='darkturquoise') axes[1].set_title('Survived distibution') sns.histplot(train[train['Survived']==0]['Age'], ax=axes[2], label ='Not survived', color='red') sns.histplot(train[train['Survived']==1]['Age'], ax=axes[2], label ='Survived', color='darkturquoise') axes[2].set_title('Survived and not Survived') plt.legend() train[train['Survived'] == 1]['Fare'].hist(bins=40, color='darkturquoise', alpha=0.7, figsize=(10, 4), label=0) train[train['Survived'] == 0]['Fare'].hist(bins=40, color='red', alpha=0.7, figsize=(10, 4), label=0) plt.xlabel('Fare') plt.legend()
code
90129087/cell_15
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() fig, ax = plt.subplots(1,2,figsize=(12,6)) train['Survived'].value_counts().plot.pie(explode=[0,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True, startangle=90,colormap='OrRd_r') sns.countplot('Survived', data=train, ax=ax[1], palette='OrRd') ax[0].set_title('Pie plot - Survivrd') ax[1].set_title('Count plot - Survived') ax[0].set_ylabel('Survival') plt.show() sns.countplot(x='Survived', data=train, hue='Pclass', palette='OrRd_r') plt.legend(loc='upper right')
code
90129087/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train = pd.read_csv('../input/titanic/train.csv') test = pd.read_csv('../input/titanic/test.csv') train.columns train.isnull() sns.heatmap(train.isnull(), yticklabels=False, cbar=False, cmap='OrRd_r')
code
90138335/cell_21
[ "application_vnd.jupyter.stderr_output_1.png" ]
from kaggle_datasets import KaggleDatasets import matplotlib.pyplot as plt import numpy as np import re import tensorflow as tf HEIGHT = 256 WIDTH = 256 BATCH_SIZE = 1 CHANNELS = 3 LAMBDA = 10 EPOCHS = 250 GCS_PATH = KaggleDatasets().get_gcs_path('gan-getting-started') try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() REPLICAS = strategy.num_replicas_in_sync AUTOTUNE = tf.data.AUTOTUNE MONET_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/monet_tfrec/*.tfrec')) PHOTO_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/photo_tfrec/*.tfrec')) def count_data_items(filenames): n = [int(re.compile('-([0-9]*)\\.').search(filename).group(1)) for filename in filenames] return np.sum(n) n_monet_samples = count_data_items(MONET_FILENAMES) n_photo_samples = count_data_items(PHOTO_FILENAMES) def decode_image(image): image = tf.image.decode_jpeg(image, channels=CHANNELS) image = tf.cast(image, tf.float32) / 127.5 - 1 image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS]) return image def read_tfrecord(example): tfrecord_format = {'image_name': tf.io.FixedLenFeature([], tf.string), 'image': tf.io.FixedLenFeature([], tf.string), 'target': tf.io.FixedLenFeature([], tf.string)} example = tf.io.parse_single_example(example, tfrecord_format) image = decode_image(example['image']) return image def load_dataset(filenames, labeled=True, ordered=False): dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE) return dataset def display_samples(dataset, row, col): dataset_iter = iter(dataset) for j in range(row * col): example_sample = next(dataset_iter) plt.axis('off') display_samples(load_dataset(PHOTO_FILENAMES).shuffle(30).batch(1), 4, 4)
code
90138335/cell_20
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from kaggle_datasets import KaggleDatasets import matplotlib.pyplot as plt import numpy as np import re import tensorflow as tf HEIGHT = 256 WIDTH = 256 BATCH_SIZE = 1 CHANNELS = 3 LAMBDA = 10 EPOCHS = 250 GCS_PATH = KaggleDatasets().get_gcs_path('gan-getting-started') try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() REPLICAS = strategy.num_replicas_in_sync AUTOTUNE = tf.data.AUTOTUNE MONET_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/monet_tfrec/*.tfrec')) PHOTO_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/photo_tfrec/*.tfrec')) def count_data_items(filenames): n = [int(re.compile('-([0-9]*)\\.').search(filename).group(1)) for filename in filenames] return np.sum(n) n_monet_samples = count_data_items(MONET_FILENAMES) n_photo_samples = count_data_items(PHOTO_FILENAMES) def decode_image(image): image = tf.image.decode_jpeg(image, channels=CHANNELS) image = tf.cast(image, tf.float32) / 127.5 - 1 image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS]) return image def read_tfrecord(example): tfrecord_format = {'image_name': tf.io.FixedLenFeature([], tf.string), 'image': tf.io.FixedLenFeature([], tf.string), 'target': tf.io.FixedLenFeature([], tf.string)} example = tf.io.parse_single_example(example, tfrecord_format) image = decode_image(example['image']) return image def load_dataset(filenames, labeled=True, ordered=False): dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE) return dataset def display_samples(dataset, row, col): dataset_iter = iter(dataset) for j in range(row * col): example_sample = next(dataset_iter) plt.axis('off') display_samples(load_dataset(MONET_FILENAMES).shuffle(30).batch(1), 4, 4)
code
90138335/cell_6
[ "image_output_1.png" ]
import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print(f'Running on TPU {tpu.master()}') except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() REPLICAS = strategy.num_replicas_in_sync print(f'REPLICAS: {REPLICAS}') AUTOTUNE = tf.data.AUTOTUNE
code
90138335/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_4.png", "text_plain_output_3.png", "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets import numpy as np import re import tensorflow as tf HEIGHT = 256 WIDTH = 256 BATCH_SIZE = 1 CHANNELS = 3 LAMBDA = 10 EPOCHS = 250 GCS_PATH = KaggleDatasets().get_gcs_path('gan-getting-started') try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() REPLICAS = strategy.num_replicas_in_sync AUTOTUNE = tf.data.AUTOTUNE MONET_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/monet_tfrec/*.tfrec')) PHOTO_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/photo_tfrec/*.tfrec')) def count_data_items(filenames): n = [int(re.compile('-([0-9]*)\\.').search(filename).group(1)) for filename in filenames] return np.sum(n) n_monet_samples = count_data_items(MONET_FILENAMES) n_photo_samples = count_data_items(PHOTO_FILENAMES) print(f'Monet TFRecord files: {len(MONET_FILENAMES)}') print(f'Monet image files: {n_monet_samples}') print(f'Photo TFRecord files: {len(PHOTO_FILENAMES)}') print(f'Photo image files: {n_photo_samples}')
code
90138335/cell_16
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets from tensorflow.keras import layers, Model, losses, optimizers import numpy as np import re import tensorflow as tf HEIGHT = 256 WIDTH = 256 BATCH_SIZE = 1 CHANNELS = 3 LAMBDA = 10 EPOCHS = 250 GCS_PATH = KaggleDatasets().get_gcs_path('gan-getting-started') try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) else: strategy = tf.distribute.get_strategy() REPLICAS = strategy.num_replicas_in_sync AUTOTUNE = tf.data.AUTOTUNE MONET_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/monet_tfrec/*.tfrec')) PHOTO_FILENAMES = tf.io.gfile.glob(str(GCS_PATH + '/photo_tfrec/*.tfrec')) def count_data_items(filenames): n = [int(re.compile('-([0-9]*)\\.').search(filename).group(1)) for filename in filenames] return np.sum(n) n_monet_samples = count_data_items(MONET_FILENAMES) n_photo_samples = count_data_items(PHOTO_FILENAMES) def decode_image(image): image = tf.image.decode_jpeg(image, channels=CHANNELS) image = tf.cast(image, tf.float32) / 127.5 - 1 image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS]) return image def read_tfrecord(example): tfrecord_format = {'image_name': tf.io.FixedLenFeature([], tf.string), 'image': tf.io.FixedLenFeature([], tf.string), 'target': tf.io.FixedLenFeature([], tf.string)} example = tf.io.parse_single_example(example, tfrecord_format) image = decode_image(example['image']) return image def load_dataset(filenames, labeled=True, ordered=False): dataset = tf.data.TFRecordDataset(filenames) dataset = dataset.map(read_tfrecord, num_parallel_calls=AUTOTUNE) return dataset data_augmentation = tf.keras.Sequential([layers.RandomFlip('horizontal'), layers.RandomZoom(0.1)])
code
122255609/cell_21
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable introCTE = '\nwith cte_review as(\n select rd.listing_id ,rd."date" as review_date,\n rd.reviewer_name ,rd.reviewer_id \n from reviews_details rd \n)\nselect cr.listing_id, cr.review_date\nfrom cte_review as cr\nlimit 5\n' queryBase(introCTE)
code
122255609/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable table_list = "select *\nfrom information_schema.tables\nwhere table_schema = 'public'\n" queryBase(table_list)
code
122255609/cell_25
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable selectRD = 'select rd.listing_id , rd.reviewer_id ,rd.reviewer_name \n from reviews_details rd \n limit 5\n ' selectLD = 'select ld.id ,ld.host_id ,ld."name" as listing_name\n from listing_details ld \n limit 5\n ' queryBase(selectRD)
code
122255609/cell_4
[ "text_html_output_1.png" ]
db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials
code
122255609/cell_23
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable anotherSQ = '\nselect count(*) as daily_booking, cq.day_date \nfrom (\n select to_date(c."date",\'YYYY-MM-DD\') as day_date, c.listing_id, \n c.available, c.price\n from calendar c\n where c.available = \'f\'\n) cq\ngroup by cq.day_date\norder by count(*) desc\nlimit 5\n' queryBase(anotherSQ)
code
122255609/cell_20
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable ctasSelect = '\nselect ln2.id , ln2.description \nfrom listing_neigh ln2 \nlimit 5\n' queryBase(ctasSelect)
code
122255609/cell_26
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable selectRD = 'select rd.listing_id , rd.reviewer_id ,rd.reviewer_name \n from reviews_details rd \n limit 5\n ' selectLD = 'select ld.id ,ld.host_id ,ld."name" as listing_name\n from listing_details ld \n limit 5\n ' queryBase(selectLD)
code
122255609/cell_2
[ "text_html_output_1.png" ]
!pip install psycopg2-binary
code
122255609/cell_19
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable roundingData = '\nselect round(avg(rl.review_scores_rating)::numeric ,1) as avg_rating, \ncount(rl.review_scores_accuracy) as num_reviews,\nrl.host_id\nfrom review_listing rl \ngroup by rl.host_id\nhaving count(rl.review_scores_accuracy) > 10\nlimit 5\n' queryBase(roundingData)
code
122255609/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable selectView = '\nselect avg(rl.review_scores_rating) as avg_rating, \n count(rl.review_scores_accuracy) as num_reviews,\n rl.host_id\nfrom review_listing rl \ngroup by rl.host_id\nhaving count(rl.review_scores_accuracy) > 10\nlimit 5\n' queryBase(selectView)
code
122255609/cell_28
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable introJoin = '\nselect rd.listing_id ,rd.reviewer_id ,rd.reviewer_name ,\nld.host_id ,ld."name" as listing_name\nfrom reviews_details rd \ninner join listing_details ld \non rd.listing_id = ld.id \nlimit 5\n' queryBase(introJoin)
code
122255609/cell_15
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: introCTAS = '\ncreate table if not exists listing_neigh as\nselect ld.id, ld.summary, ld.description ,ld.neighborhood_overview \nfrom listing_details ld\n' queryCreate(introCTAS)
code
122255609/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: dropview = 'DROP VIEW IF EXISTS review_listing' queryCreate(dropview)
code
122255609/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: introView = '\ncreate view review_listing as\nselect ld.id ,ld.host_id ,ld.review_scores_rating , ld.review_scores_accuracy ,\nld.review_scores_checkin , ld.review_scores_value ,\nld.review_scores_communication , ld.review_scores_location \nfrom listing_details ld \n' queryCreate(introView)
code
122255609/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: droptab = 'DROP TABLE IF EXISTS listing_neigh' queryCreate(droptab)
code
122255609/cell_22
[ "text_html_output_1.png" ]
import pandas as pd db = 'new_db' user = 'postgres' passwd = '1234' port = 5432 host = 'ec2-44-201-58-213.compute-1.amazonaws.com' credentials = 'postgresql://{}:{}@{}:{}/{}'.format(user, passwd, host, port, db) credentials def schemaGen(dataframe, schemaName): localSchema = pd.io.sql.get_schema(dataframe, schemaName) localSchema = localSchema.replace('TEXT', 'VARCHAR(255)').replace('INTEGER', 'NUMERIC').replace('\n', '').replace('"', '') return ''.join(localSchema) def getSchema(tableName, credentials): schema = pd.read_sql("SELECT * FROM information_schema.columns where table_name='{}'".format(tableName), con=credentials) return schema def queryCreate(query): try: requiredTable = pd.read_sql(query, con=credentials) except Exception as e: def queryBase(query): requiredTable = pd.read_sql(query, con=credentials) return requiredTable introSQ = '\nselect * from (\n select l.id , l.host_id ,l.host_name ,l.price \n from listings l \n where l.id > 2800\n) as sq\nwhere sq.host_id = 59484\n' queryBase(introSQ)
code
73080198/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) cutoff = 5 print(f"{(train['target'] < cutoff).sum() / len(train) * 100:.3f}% of the target values are less than {cutoff}")
code
73080198/cell_9
[ "image_output_1.png" ]
from scipy import stats from scipy import stats import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) from scipy import stats stats.mstats.skew(train['target']).data from scipy import stats stats.mstats.kurtosis(train['target'])
code
73080198/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) predictions_base = pd.read_csv('/kaggle/input/submissionstevenferrercsv/submissionStevenFerrer.csv', low_memory=False) predictions_base.head()
code
73080198/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) fig, ax = plt.subplots(figsize=(12, 6)) bars = ax.hist(train["target"], bins=40, range=(0,11), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=3500, range=(6.9,10.4), color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(6940, 10400)] train['target'].value_counts(bins=cuts) fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=100, range=(8.05,8.15), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[60:75] # inverse_log=np.power(10, train["target"]) inverse_log=np.exp(train["target"]) # comment this out, uncomment line above to see 10^train["target"] instead of e^train["target"] fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(inverse_log, range=(0,40000), bins=4000, color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i for i in range(0, 40000, 10)] inverse_log.value_counts(bins=cuts)
code
73080198/cell_20
[ "image_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) cuts = [i / 1000 for i in range(6940, 10400)] train['target'].value_counts(bins=cuts) cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[60:75]
code
73080198/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) predictions_base = pd.read_csv('/kaggle/input/submissionstevenferrercsv/submissionStevenFerrer.csv', low_memory=False) predictions_base.head()
code
73080198/cell_29
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) fig, ax = plt.subplots(figsize=(12, 6)) bars = ax.hist(train["target"], bins=40, range=(0,11), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=3500, range=(6.9,10.4), color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(6940, 10400)] train['target'].value_counts(bins=cuts) fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=100, range=(8.05,8.15), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[60:75] # inverse_log=np.power(10, train["target"]) inverse_log=np.exp(train["target"]) # comment this out, uncomment line above to see 10^train["target"] instead of e^train["target"] fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(inverse_log, range=(0,40000), bins=4000, color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i for i in range(0, 40000, 10)] inverse_log.value_counts(bins=cuts) cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[35:95]
code
73080198/cell_2
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from sklearn.preprocessing import OrdinalEncoder from xgboost import XGBRegressor import random from sklearn.model_selection import KFold from sklearn.metrics import mean_squared_error import os for dirname, _, filenames in os.walk('/kaggle/input/'): for filename in filenames: print(os.path.join(dirname, filename))
code
73080198/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) fig, ax = plt.subplots(figsize=(12, 6)) bars = ax.hist(train["target"], bins=40, range=(0,11), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=3500, range=(6.9,10.4), color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train['target'], bins=100, range=(8.05, 8.15), color='orange', edgecolor='black') ax.set_title('Target distribution', fontsize=20, pad=15) ax.set_ylabel('Amount of values', fontsize=14, labelpad=15) ax.set_xlabel('Target value', fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis='y') plt.show()
code
73080198/cell_8
[ "text_plain_output_1.png" ]
from scipy import stats import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) from scipy import stats stats.mstats.skew(train['target']).data
code
73080198/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) fig, ax = plt.subplots(figsize=(12, 6)) bars = ax.hist(train["target"], bins=40, range=(0,11), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train['target'], bins=3500, range=(6.9, 10.4), color='orange', edgecolor='orange') ax.set_title('Target distribution', fontsize=20, pad=15) ax.set_ylabel('Amount of values', fontsize=14, labelpad=15) ax.set_xlabel('Target value', fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis='y') plt.show()
code
73080198/cell_17
[ "text_html_output_1.png" ]
import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) cuts = [i / 1000 for i in range(6940, 10400)] train['target'].value_counts(bins=cuts)
code
73080198/cell_31
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv', low_memory=False) test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv', low_memory=False) predictions_base = pd.read_csv('/kaggle/input/submissionstevenferrercsv/submissionStevenFerrer.csv', low_memory=False) fig, ax = plt.subplots(figsize=(12, 6)) bars = ax.hist(train["target"], bins=40, range=(0,11), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=3500, range=(6.9,10.4), color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(6940, 10400)] train['target'].value_counts(bins=cuts) fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(train["target"], bins=100, range=(8.05,8.15), color="orange", edgecolor="black") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[60:75] # inverse_log=np.power(10, train["target"]) inverse_log=np.exp(train["target"]) # comment this out, uncomment line above to see 10^train["target"] instead of e^train["target"] fig, ax = plt.subplots(figsize=(24, 12)) bars = ax.hist(inverse_log, range=(0,40000), bins=4000, color="orange", edgecolor="orange") ax.set_title("Target distribution", fontsize=20, pad=15) ax.set_ylabel("Amount of values", fontsize=14, labelpad=15) ax.set_xlabel("Target value", fontsize=14, labelpad=10) ax.margins(0.025, 0.12) ax.grid(axis="y") plt.show(); cuts = [i for i in range(0, 40000, 10)] inverse_log.value_counts(bins=cuts) cuts = [i / 1000 for i in range(8050, 8150)] train['target'].value_counts(bins=cuts).sort_index()[35:95] predictions_base['target'].value_counts(bins=cuts).sort_index().tail(50)
code