path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
104114403/cell_53
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import roc_curve, roc_auc_score, auc from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns pima = pd.read_csv('../input/pimacsv/pima.csv') pima.shape #Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below) corm = pima.iloc[:,:-1].corr() masko = np.zeros_like(corm, dtype = np.bool) masko[np.triu_indices_from(masko)] = True fig, ax = plt.subplots(figsize = (10,5)) sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True) X = pima.loc[:, pima.columns != 'Outcome'] y = pima.loc[:, 'Outcome'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10) from sklearn.linear_model import LogisticRegression logr = LogisticRegression(random_state=0) logr.fit(X_train, y_train) from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score ypred_train_logr = logr.predict(X_train) ypred_test_logr = logr.predict(X_test) yprob_test_logr = logr.predict_proba(X_test) yprob_test_logr[0:5, :].round(3) from sklearn.metrics import roc_curve, roc_auc_score, auc fpr_logr, tpr_logr, _ = roc_curve(y_test, yprob_test_logr[:, 1]) auc_logr = auc(fpr_logr, tpr_logr) fig = plt.figure(figsize=(8, 5)) plt.plot(fpr_logr, tpr_logr, label='AUC score is : ' + str(auc_logr)) plt.xlabel('fpr', fontsize=10) plt.ylabel('tpr', fontsize=10) plt.xlim([-0.01, 1]) plt.ylim([0, 1.01]) plt.legend() plt.plot([0, 1], [0, 1], 'r--') plt.show() print('AUC Score for logistic regression is', roc_auc_score(y_test, yprob_test_logr[:, 1]))
code
104114403/cell_10
[ "text_html_output_1.png" ]
import pandas as pd pima = pd.read_csv('../input/pimacsv/pima.csv') pima.shape
code
104114403/cell_37
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns pima = pd.read_csv('../input/pimacsv/pima.csv') pima.shape #Your code (first create a correlation matrix and store it in 'corm' variable, and uncomment lines below) corm = pima.iloc[:,:-1].corr() masko = np.zeros_like(corm, dtype = np.bool) masko[np.triu_indices_from(masko)] = True fig, ax = plt.subplots(figsize = (10,5)) sns.heatmap(corm, mask = masko, cmap = 'coolwarm', annot=True) X = pima.loc[:, pima.columns != 'Outcome'] y = pima.loc[:, 'Outcome'] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=10) from sklearn.linear_model import LogisticRegression logr = LogisticRegression(random_state=0) logr.fit(X_train, y_train)
code
104114403/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd pima = pd.read_csv('../input/pimacsv/pima.csv') pima.shape pima['Outcome'].value_counts()
code
104114403/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd pima = pd.read_csv('../input/pimacsv/pima.csv') pima.head(3)
code
130027731/cell_42
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] embedding_dim
code
130027731/cell_21
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns from sklearn.preprocessing import LabelEncoder lbl_encoders = {} lbl_encoders['MSSubClass'] = LabelEncoder() lbl_encoders['MSSubClass'].fit_transform(df['MSSubClass']) lbl_encoders
code
130027731/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape for i in df.columns: print('Column name {} and unique values are {} '.format(i, len(df[i].unique())))
code
130027731/cell_25
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features
code
130027731/cell_57
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch import torch.nn as nn import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation pd.set_option('display.max_rows', 500) embedding_val = [] for i, e in enumerate(embed_representation): embedding_val.append(e(cat_features[:, i])) z = torch.cat(embedding_val, 1) z dropout = nn.Dropout(0.4) import torch import torch.nn as nn import torch.nn.functional as F class FeedForwardNN(nn.Module): def __init__(self, embedding_dim, n_cont, out_sz, layers, p=0.5): super().__init__() self.embeds = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) self.emb_drop = nn.Dropout(p) self.bn_cont = nn.BatchNorm1d(n_cont) layerlist = [] n_emb = sum((out for inp, out in embedding_dim)) n_in = n_emb + n_cont for i in layers: layerlist.append(nn.Linear(n_in, i)) layerlist.append(nn.ReLU(inplace=True)) layerlist.append(nn.BatchNorm1d(i)) layerlist.append(nn.Dropout(p)) n_in = i layerlist.append(nn.Linear(layers[-1], out_sz)) self.layers = nn.Sequential(*layerlist) def forward(self, x_cat, x_cont): embeddings = [] for i, e in enumerate(self.embeds): embeddings.append(e(x_cat[:, i])) x = torch.cat(embeddings, 1) x = self.emb_drop(x) x_cont = self.bn_cont(x_cont) x = torch.cat([x, x_cont], 1) x = self.layers(x) return x torch.manual_seed(100) model = FeedForwardNN(embedding_dim, len(cont_features), 1, [100, 50], p=0.4) model.parameters
code
130027731/cell_34
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape
code
130027731/cell_23
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns df
code
130027731/cell_30
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y
code
130027731/cell_33
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) len(df['MSSubClass'].unique())
code
130027731/cell_44
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) cat_features
code
130027731/cell_20
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns from sklearn.preprocessing import LabelEncoder lbl_encoders = {} lbl_encoders['MSSubClass'] = LabelEncoder() lbl_encoders['MSSubClass'].fit_transform(df['MSSubClass'])
code
130027731/cell_55
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch import torch.nn as nn import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation pd.set_option('display.max_rows', 500) embedding_val = [] for i, e in enumerate(embed_representation): embedding_val.append(e(cat_features[:, i])) z = torch.cat(embedding_val, 1) z dropout = nn.Dropout(0.4) import torch import torch.nn as nn import torch.nn.functional as F class FeedForwardNN(nn.Module): def __init__(self, embedding_dim, n_cont, out_sz, layers, p=0.5): super().__init__() self.embeds = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) self.emb_drop = nn.Dropout(p) self.bn_cont = nn.BatchNorm1d(n_cont) layerlist = [] n_emb = sum((out for inp, out in embedding_dim)) n_in = n_emb + n_cont for i in layers: layerlist.append(nn.Linear(n_in, i)) layerlist.append(nn.ReLU(inplace=True)) layerlist.append(nn.BatchNorm1d(i)) layerlist.append(nn.Dropout(p)) n_in = i layerlist.append(nn.Linear(layers[-1], out_sz)) self.layers = nn.Sequential(*layerlist) def forward(self, x_cat, x_cont): embeddings = [] for i, e in enumerate(self.embeds): embeddings.append(e(x_cat[:, i])) x = torch.cat(embeddings, 1) x = self.emb_drop(x) x_cont = self.bn_cont(x_cont) x = torch.cat([x, x_cont], 1) x = self.layers(x) return x torch.manual_seed(100) model = FeedForwardNN(embedding_dim, len(cont_features), 1, [100, 50], p=0.4) model
code
130027731/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.head()
code
130027731/cell_40
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] cat_dims
code
130027731/cell_29
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype
code
130027731/cell_39
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape len(df['MSSubClass'].unique())
code
130027731/cell_48
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation pd.set_option('display.max_rows', 500) embedding_val = [] for i, e in enumerate(embed_representation): embedding_val.append(e(cat_features[:, i])) z = torch.cat(embedding_val, 1) z
code
130027731/cell_61
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) cont_values.shape
code
130027731/cell_60
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) cont_values
code
130027731/cell_50
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation pd.set_option('display.max_rows', 500) embedding_val = [] for i, e in enumerate(embed_representation): embedding_val.append(e(cat_features[:, i])) z = torch.cat(embedding_val, 1) z dropout = nn.Dropout(0.4) final_embed = dropout(z) final_embed
code
130027731/cell_52
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) len(cont_features)
code
130027731/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130027731/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.info()
code
130027731/cell_45
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) cat_featuresz = cat_features[:4] cat_featuresz
code
130027731/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns df['MSSubClass'].unique()
code
130027731/cell_32
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape)
code
130027731/cell_59
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape df.shape
code
130027731/cell_28
[ "text_html_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values
code
130027731/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns
code
130027731/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns df.head()
code
130027731/cell_47
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features import torch cat_features = torch.tensor(cat_features, dtype=torch.int64) cat_features cont_features = [] for i in df.columns: cont_features.append(i) cont_values = np.stack([df[i].values for i in cont_features], axis=1) cont_values = torch.tensor(cont_values, dtype=torch.float) cont_values cont_values.dtype y = torch.tensor(df['SalePrice'].values, dtype=torch.float).reshape(-1, 1) y (cat_features.shape, cont_values.shape, y.shape) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation pd.set_option('display.max_rows', 500) embedding_val = [] for i, e in enumerate(embed_representation): embedding_val.append(e(cat_features[:, i])) embedding_val
code
130027731/cell_43
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch.nn as nn df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] embedding_dim = [(x, min(50, (x + 1) // 2)) for x in cat_dims] import torch import torch.nn as nn import torch.nn.functional as F embed_representation = nn.ModuleList([nn.Embedding(inp, out) for inp, out in embedding_dim]) embed_representation
code
130027731/cell_31
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.info()
code
130027731/cell_24
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cat_features = ['MSSubClass', 'MSZoning', 'Street', 'LotShape'] out_features = 'SalePrice' cat_features = np.stack([df['MSSubClass'], df['MSZoning'], df['Street'], df['LotShape']], 1) cat_features
code
130027731/cell_27
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) cont_features
code
130027731/cell_37
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape df.drop('YearBuilt', axis=1, inplace=True) df.columns cont_features = [] for i in df.columns: cont_features.append(i) df.shape cat_dims = [len(df[col].unique()) for col in ['MSSubClass', 'MSZoning', 'Street', 'LotShape']] cat_dims
code
130027731/cell_12
[ "text_plain_output_1.png" ]
import datetime import datetime datetime.datetime.now().year
code
130027731/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/advancehousepriceprediction/train.csv', usecols=['SalePrice', 'MSSubClass', 'MSZoning', 'LotFrontage', 'LotArea', 'Street', 'YearBuilt', 'LotShape', '1stFlrSF', '2ndFlrSF']).dropna() df.shape
code
17115578/cell_13
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.tight_layout() df_levels['CHEMBARAMBAKKAM'].plot() plt.xlabel('CHEMBARAMBAKKAM') plt.tight_layout()
code
17115578/cell_9
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique()
code
17115578/cell_25
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.tight_layout() plt.tight_layout() plt.tight_layout() df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rain['Index'] = pd.to_datetime(df_rain['Date']) df_rain.index = df_rain['Index'] del df_rain['Date'] import matplotlib.pyplot as plt plt.subplot(411) plt.plot(df_rain['Index'], df_rain['POONDI']) plt.xlabel('Poondi') plt.tight_layout() plt.subplot(412) plt.plot(df_rain['CHOLAVARAM']) plt.xlabel('CHOLAVARAM') plt.tight_layout() plt.subplot(413) plt.plot(df_rain['REDHILLS']) plt.xlabel('REDHILLS') plt.tight_layout() plt.subplot(414) plt.plot(df_rain['CHEMBARAMBAKKAM']) plt.xlabel('CHEMBARAMBAKKAM') plt.tight_layout()
code
17115578/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels.head(2)
code
17115578/cell_23
[ "text_html_output_1.png" ]
df_rain.describe()
code
17115578/cell_20
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rainfall['Year'] = df_rainfall.index.year df_rainfall['Month'] = df_rainfall.index.month df_rainfall['Weekday Name'] = df_rainfall.index.weekday_name df_rainfall.sample(5, random_state=0)
code
17115578/cell_6
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.head(2)
code
17115578/cell_2
[ "image_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17115578/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt "\nplt.subplot(411)\nplt.plot(df_rain['POONDI'])\nplt.xlabel('Poondi')\nplt.tight_layout()\nplt.subplot(412)\nplt.plot(df_levels['CHOLAVARAM'])\nplt.xlabel('CHOLAVARAM')\nplt.tight_layout()\nplt.subplot(413)\nplt.plot(df_levels['REDHILLS'])\nplt.xlabel('REDHILLS')\nplt.tight_layout()\nplt.subplot(414)\nplt.plot(df_levels['CHEMBARAMBAKKAM'])\nplt.xlabel('CHEMBARAMBAKKAM')\nplt.tight_layout()\n"
code
17115578/cell_19
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.tight_layout() plt.tight_layout() plt.tight_layout() df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) plt.figure(figsize=(10, 5)) df_rainfall['POONDI'].plot()
code
17115578/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.head(2)
code
17115578/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rainfall.head(2)
code
17115578/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.tail(2)
code
17115578/cell_15
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.head()
code
17115578/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rainfall.head(2)
code
17115578/cell_24
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rain['Index'] = pd.to_datetime(df_rain['Date']) df_rain.index = df_rain['Index'] del df_rain['Date'] df_rain.head(2)
code
17115578/cell_14
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt import matplotlib.pyplot as plt plt.tight_layout() plt.tight_layout() df_levels['CHOLAVARAM'].plot() plt.xlabel('CHOLAVARAM') plt.tight_layout()
code
17115578/cell_22
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_rainfall = pd.read_csv('../input/chennai_reservoir_rainfall.csv') df_rainfall.index = pd.to_datetime(df_rainfall['Date']) df_rainfall['Year'] = df_rainfall.index.year df_rainfall['Month'] = df_rainfall.index.month df_rainfall['Weekday Name'] = df_rainfall.index.weekday_name df_rainfall.sample(5, random_state=0) col_plt = ['POONDI', 'REDHILLS', 'CHOLAVARAM', 'CHEMBARAMBAKKAM'] axes = df_rainfall[col_plt].plot(marker='.', alpha=0.5, linestyle='None', figsize=(11, 9), subplots=True) for ax in axes: ax.set_ylabel('Daily Rainfall')
code
17115578/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt plt.plot(df_levels['POONDI'])
code
17115578/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels['Index'] = pd.to_datetime(df_levels['Date']) df_levels.index = df_levels['Index'] del df_levels['Date'] del df_levels['Index'] df_levels.shape df_levels['Index'].nunique() import matplotlib.pyplot as plt import matplotlib.pyplot as plt df_levels['POONDI'].plot() plt.xlabel('Poondi') plt.tight_layout()
code
17115578/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df_levels = pd.read_csv('../input/chennai_reservoir_levels.csv') df_levels.describe()
code
73078726/cell_13
[ "image_output_1.png" ]
from kaggle_datasets import KaggleDatasets from tensorflow.keras import layers import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w'] IMAGE_SIZE = 128 IMAGE_DEPTH = 32 BATCH_SIZE = 32 CHANNELS = len(mri_types) AUTO = tf.data.AUTOTUNE def deserialize_example(serialized_string): image_feature_description = {'image': tf.io.FixedLenFeature([], tf.string), 'MGMT_value': tf.io.FixedLenFeature([], tf.float32)} parsed_record = tf.io.parse_single_example(serialized_string, image_feature_description) image = tf.io.decode_raw(parsed_record['image'], tf.float64) image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH, CHANNELS]) label = parsed_record['MGMT_value'] return (image, label) GCS_PATH = KaggleDatasets().get_gcs_path('rsna-brain-tumor-classification-tfrecords') tf_train_path = GCS_PATH + '/tfrecords/train' tf_valid_path = GCS_PATH + '/tfrecords/valid' train_set = tf.data.TFRecordDataset(str(tf_train_path + os.sep + 'brain_train.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) valid_set = tf.data.TFRecordDataset(str(tf_valid_path + os.sep + 'brain_val.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) def get_model(width=128, height=128, depth=32): inputs = tf.keras.Input((width, height, depth, 4)) x = layers.Conv3D(filters=32, kernel_size=2, activation='relu', padding='same')(inputs) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=64, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=128, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=256, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=512, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Flatten()(x) x = layers.Dense(units=128, activation='relu')(x) x = layers.Dense(units=128, activation='relu')(x) outputs = layers.Dense(units=1, activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) return model USE_TPU = True if USE_TPU: with strategy.scope(): model = get_model(width=IMAGE_SIZE, height=IMAGE_SIZE, depth=IMAGE_DEPTH) model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) else: model = get_model(width=IMAGE_SIZE, height=IMAGE_SIZE, depth=IMAGE_DEPTH) model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.summary()
code
73078726/cell_2
[ "image_output_1.png" ]
import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() print('Device:', tpu.master()) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() print('Number of replicas:', strategy.num_replicas_in_sync)
code
73078726/cell_11
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets import matplotlib.pyplot as plt import numpy as np import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w'] IMAGE_SIZE = 128 IMAGE_DEPTH = 32 BATCH_SIZE = 32 CHANNELS = len(mri_types) AUTO = tf.data.AUTOTUNE def deserialize_example(serialized_string): image_feature_description = {'image': tf.io.FixedLenFeature([], tf.string), 'MGMT_value': tf.io.FixedLenFeature([], tf.float32)} parsed_record = tf.io.parse_single_example(serialized_string, image_feature_description) image = tf.io.decode_raw(parsed_record['image'], tf.float64) image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH, CHANNELS]) label = parsed_record['MGMT_value'] return (image, label) GCS_PATH = KaggleDatasets().get_gcs_path('rsna-brain-tumor-classification-tfrecords') tf_train_path = GCS_PATH + '/tfrecords/train' tf_valid_path = GCS_PATH + '/tfrecords/valid' train_set = tf.data.TFRecordDataset(str(tf_train_path + os.sep + 'brain_train.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) valid_set = tf.data.TFRecordDataset(str(tf_valid_path + os.sep + 'brain_val.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) d = train_set.take(1) for i, j in d: image = i label = j img_id = np.random.randint(0, BATCH_SIZE) channel = np.random.randint(0, CHANNELS) plt.figure(figsize=(20, 10), facecolor=(0, 0, 0)) cols = IMAGE_DEPTH // 4 rows = 4 plt.axis('off') for layer_idx in range(IMAGE_DEPTH): ax = plt.subplot(rows, cols, layer_idx + 1) ax.imshow(np.squeeze(image[img_id, :, :, layer_idx, channel]), cmap='gray') ax.axis('off') ax.set_title(str(layer_idx + 1), color='r', y=-0.01) plt.suptitle(f'Batch Image NO.: {img_id}, MRI Type: {mri_types[channel]}, Shape: {image[img_id].shape}', color='w') plt.subplots_adjust(wspace=0, hspace=0) plt.show()
code
73078726/cell_15
[ "text_plain_output_1.png" ]
from kaggle_datasets import KaggleDatasets from tensorflow.keras import layers import matplotlib.pyplot as plt import numpy as np import tensorflow as tf try: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.experimental.TPUStrategy(tpu) except: strategy = tf.distribute.get_strategy() mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w'] IMAGE_SIZE = 128 IMAGE_DEPTH = 32 BATCH_SIZE = 32 CHANNELS = len(mri_types) AUTO = tf.data.AUTOTUNE def deserialize_example(serialized_string): image_feature_description = {'image': tf.io.FixedLenFeature([], tf.string), 'MGMT_value': tf.io.FixedLenFeature([], tf.float32)} parsed_record = tf.io.parse_single_example(serialized_string, image_feature_description) image = tf.io.decode_raw(parsed_record['image'], tf.float64) image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, IMAGE_DEPTH, CHANNELS]) label = parsed_record['MGMT_value'] return (image, label) GCS_PATH = KaggleDatasets().get_gcs_path('rsna-brain-tumor-classification-tfrecords') tf_train_path = GCS_PATH + '/tfrecords/train' tf_valid_path = GCS_PATH + '/tfrecords/valid' train_set = tf.data.TFRecordDataset(str(tf_train_path + os.sep + 'brain_train.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) valid_set = tf.data.TFRecordDataset(str(tf_valid_path + os.sep + 'brain_val.tfrec'), compression_type='GZIP').map(deserialize_example).batch(BATCH_SIZE) d = train_set.take(1) for i, j in d: image = i label = j img_id = np.random.randint(0, BATCH_SIZE) channel = np.random.randint(0,CHANNELS) plt.figure(figsize=(20,10),facecolor=(0,0,0)) cols = IMAGE_DEPTH//4 rows = 4 plt.axis("off") for layer_idx in range(IMAGE_DEPTH): ax = plt.subplot(rows,cols,layer_idx+1) ax.imshow(np.squeeze(image[img_id,:,:,layer_idx,channel]), cmap="gray") ax.axis("off") ax.set_title(str(layer_idx+1),color='r',y=-0.01) plt.suptitle(f"Batch Image NO.: {img_id}, MRI Type: {mri_types[channel]}, Shape: {image[img_id].shape}", color="w") plt.subplots_adjust(wspace=0, hspace=0) plt.show() def get_model(width=128, height=128, depth=32): inputs = tf.keras.Input((width, height, depth, 4)) x = layers.Conv3D(filters=32, kernel_size=2, activation='relu', padding='same')(inputs) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=64, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=128, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=256, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Conv3D(filters=512, kernel_size=2, activation='relu', padding='same')(x) x = layers.MaxPool3D(2)(x) x = layers.Flatten()(x) x = layers.Dense(units=128, activation='relu')(x) x = layers.Dense(units=128, activation='relu')(x) outputs = layers.Dense(units=1, activation='sigmoid')(x) model = tf.keras.Model(inputs, outputs) return model USE_TPU = True if USE_TPU: with strategy.scope(): model = get_model(width=IMAGE_SIZE, height=IMAGE_SIZE, depth=IMAGE_DEPTH) model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) else: model = get_model(width=IMAGE_SIZE, height=IMAGE_SIZE, depth=IMAGE_DEPTH) model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.summary() early_stopping_cb = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5) history = model.fit(train_set, validation_data=valid_set, epochs=20, callbacks=[early_stopping_cb])
code
73078726/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt plt.figure(figsize=(16, 7)) acc = history.history['accuracy'] val_acc = history.history['val_accuracy'] loss = history.history['loss'] val_loss = history.history['val_loss'] epochs = range(1, len(acc) + 1) ax1 = plt.subplot(1, 2, 1) ax1.plot(epochs, acc, 'r') ax1.plot(epochs, val_acc, 'b') ax1.set_xticks([i for i in epochs]) ax1.set_title('Training and validation Accuracy') ax1.legend(['Training', 'Validation']) ax1.set_xlabel('epochs') ax1.set_ylabel('Accuracy') ax2 = plt.subplot(1, 2, 2) ax2.plot(epochs, loss, 'r') ax2.plot(epochs, val_loss, 'b') ax2.set_xticks([i for i in epochs]) ax2.legend(['Training', 'Validation']) ax2.set_xlabel('Epochs') ax2.set_ylabel('Loss') ax2.set_title('Training and validation loss') plt.show()
code
17141482/cell_9
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None dataset.hist(column=['Age', 'Education', 'hours_per_week'], figsize=(6, 5)) pylab.suptitle('Analyzing distribution for the dataset', fontsize='xx-large') Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket']
code
17141482/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) dataset.head()
code
17141482/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) dataset.head()
code
17141482/cell_1
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import tensorflow as tf from tensorflow.python.framework import ops import sklearn from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np import pylab from sklearn.preprocessing import LabelEncoder from sklearn.base import BaseEstimator, TransformerMixin import seaborn as sns import math import os print(os.listdir('../input'))
code
17141482/cell_7
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) obj = CategoricalImputer(columns=['workclass', 'Occupation', 'Native_Country']) train_result = obj.fit(dataset[['workclass', 'Occupation', 'Native_Country']]) dataset[['workclass', 'Occupation', 'Native_Country']] = train_result.transform(dataset[['workclass', 'Occupation', 'Native_Country']]) test_obj = CategoricalImputer(columns=['workclass', 'Occupation', 'Native_Country']) test_result = test_obj.fit(test_dataset[['workclass', 'Occupation', 'Native_Country']]) test_dataset[['workclass', 'Occupation', 'Native_Country']] = test_result.transform(test_dataset[['workclass', 'Occupation', 'Native_Country']])
code
17141482/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) test_dataset.head()
code
17141482/cell_12
[ "text_html_output_1.png" ]
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pylab import seaborn as sns dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) def Income_bracket_binarization(feat_val): if feat_val == '<=50K': return 0 else: return 1 dataset['Income_bracket'] = dataset['Income_bracket'].apply(Income_bracket_binarization) test_dataset['Income_bracket'] = test_dataset['Income_bracket'].apply(Income_bracket_binarization) class CategoricalImputer: def __init__(self, columns=None, strategy='most_frequent'): self.columns = columns self.strategy = strategy def fit(self, X, y=None): if self.columns is None: self.columns = X.columns if self.strategy is 'most_frequent': self.fill = {column: X[column].value_counts().index[0] for column in self.columns} else: self.fill = {column: '0' for column in self.columns} return self def transform(self, X): for column in self.columns: X[column] = X[column].fillna(self.fill[column]) return X def Plot(): #Find Indices where Income is >50K and <=50K fig = plt.figure(figsize=(15,15)) fig.subplots_adjust(hspace=0.7, wspace=0.7) pylab.suptitle("Analyzing the dataset", fontsize="xx-large") plt.subplot(3,2,1) ax = sns.countplot(x='Age', hue='Income_bracket', data=dataset) plt.subplot(3,2,2) ax =sns.countplot(x='workclass', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,3) ax =sns.countplot(x='Education', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,4) ax = sns.countplot(x='Occupation', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,5) ax = sns.countplot(x='Gender', hue='Income_bracket', data=dataset) ax.set_xticklabels(ax.get_xticklabels(), rotation=40, ha="right") plt.subplot(3,2,6) ax = sns.countplot(x='hours_per_week', hue='Income_bracket', data=dataset) return None Plot() X = dataset.drop('Income_bracket', axis=1) y = dataset['Income_bracket'] class Categorical_Encoder(BaseEstimator, TransformerMixin): def __init__(self, columns=None): self.columns = columns self.encoders = None def fit(self, data, target=None): """ Expects a data frame with named columns to encode. """ if self.columns is None: self.columns = data.columns self.encoders = {column: LabelEncoder().fit(data[column]) for column in self.columns} return self def transform(self, data): """ Uses the encoders to transform a data frame. """ output = data.copy() for column, encoder in self.encoders.items(): output[column] = encoder.transform(data[column]) return output categorical_features = {column: list(dataset[column].unique()) for column in dataset.columns if dataset[column].dtype == 'object'} encoder = Categorical_Encoder(categorical_features.keys()) dataset = encoder.fit_transform(dataset) dataset.head()
code
17141482/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('../input/census-data-set/Census Income Dataset.csv', na_values=[' ?']) test_dataset = pd.read_csv('../input/census-test-dataset/Census Income Testset.csv', na_values=[' ?']) test_dataset.head()
code
34140277/cell_21
[ "text_html_output_1.png" ]
""" cols = ['pCut::Motor_Torque', 'pCut::CTRL_Position_controller::Lag_error', 'pCut::CTRL_Position_controller::Actual_position', 'pCut::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Actual_position', 'pSvolFilm::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Lag_error', 'pSpintor::VAX_speed', 'Mode'] pca = PCA(n_components=2) data_pca = pca.fit_transform(data[cols].values) #data['pca-one'] = data_pca[:,0] #data['pca-two'] = data_pca[:,1] print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_)) """
code
34140277/cell_13
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.head()
code
34140277/cell_25
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns anomaly_ind = data[data['anomaly'] == -1].index normal_ind = data[data['anomaly'] != -1].index data.columns import matplotlib.pyplot as plt import seaborn as sns features = ['pCut::Motor_Torque', 'pCut::CTRL_Position_controller::Lag_error', 'pCut::CTRL_Position_controller::Actual_position', 'pCut::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Actual_position', 'pSvolFilm::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Lag_error', 'pSpintor::VAX_speed', 'Mode'] for feature in features: plt.figure(figsize=(15, 7)) plt.plot(data[feature], color='blue', label='normal') plt.scatter(x=data.iloc[anomaly_ind].index, y=data.iloc[anomaly_ind][feature], color='red', label='anomalous') plt.title(feature) plt.legend()
code
34140277/cell_4
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
""" if not os.path.exists('/kaggle/working/compiled_df'): os.makedirs('/kaggle/working/compiled_df') #Saves dataframe to a csv file, removes a index df.to_csv('/kaggle/working/compiled_df/Combined.csv', index=False) """
code
34140277/cell_34
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.ensemble import IsolationForest from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns anomaly_ind = data[data['anomaly'] == -1].index normal_ind = data[data['anomaly'] != -1].index anomaly_pca = pd.DataFrame(data_pca[anomaly_ind]) normal_pca = pd.DataFrame(data_pca[normal_ind]) anomaly_pca data.columns import matplotlib.pyplot as plt import seaborn as sns features = ['pCut::Motor_Torque', 'pCut::CTRL_Position_controller::Lag_error', 'pCut::CTRL_Position_controller::Actual_position', 'pCut::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Actual_position', 'pSvolFilm::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Lag_error', 'pSpintor::VAX_speed', 'Mode'] data = df.copy() data = data[:10000] data = data.drop(['timestamp', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.ensemble import IsolationForest rs = np.random.RandomState(0) clf = IsolationForest(max_samples=100, random_state=rs, contamination=0.1) clf.fit(data) if_scores = clf.decision_function(data) if_anomalies = clf.predict(data) if_anomalies = pd.Series(if_anomalies).replace([-1, 1], [1, 0]) anomaly_ind = if_anomalies[if_anomalies == 1].index features = ['pCut::Motor_Torque', 'pCut::CTRL_Position_controller::Lag_error', 'pCut::CTRL_Position_controller::Actual_position', 'pCut::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Actual_position', 'pSvolFilm::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Lag_error', 'pSpintor::VAX_speed', 'Mode'] for feature in features: plt.figure(figsize=(15, 7)) plt.scatter(data.index, data[feature], c='green', label='normal') plt.scatter(anomaly_ind, data.iloc[anomaly_ind][feature], c='red', label='anomaly') plt.ylabel(feature) plt.title(feature) plt.legend()
code
34140277/cell_23
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns anomaly_ind = data[data['anomaly'] == -1].index normal_ind = data[data['anomaly'] != -1].index anomaly_pca = pd.DataFrame(data_pca[anomaly_ind]) normal_pca = pd.DataFrame(data_pca[normal_ind]) anomaly_pca
code
34140277/cell_20
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns
code
34140277/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') df.head(10)
code
34140277/cell_2
[ "text_html_output_1.png" ]
import os import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34140277/cell_1
[ "text_plain_output_1.png" ]
""" import os import glob import pandas as pd #os.chdir("/mydir") files = [i for i in glob.glob('/kaggle/input/one-year-industrial-component-degradation/*.{}'.format('csv'))] files extension = 'csv' all_filenames = [i for i in glob.glob('/kaggle/input/one-year-industrial-component-degradation/*[mode1].{}'.format(extension))] + [i for i in glob.glob('/kaggle/input/one-year-industrial-component-degradation/oneyeardata/*[mode1].{}'.format(extension))] #print(all_filenames) #combine all files in the list df = pd.concat([pd.read_csv(f) for f in all_filenames ]) #export to csv df.to_csv( "combined_csv.csv", index=False, encoding='utf-8-sig') """
code
34140277/cell_15
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) X_anomaly = data[data['anomaly'] == -1] X_normal = data[data['anomaly'] != -1] print(X_anomaly.shape, X_normal.shape)
code
34140277/cell_3
[ "text_plain_output_1.png" ]
""" filenames = os.listdir('/kaggle/input/one-year-industrial-component-degradation/') filenames = [i.strip(".csv") for i in filenames] filenames.sort() filenames.remove('oneyeardata') parsed_filenames = [] for name in filenames: temp = name.split("T") month, date = temp[0].split("-") rhs = temp[1].split("_") hours, minutes, seconds = rhs[0][:2], rhs[0][2:4], rhs[0][4:] sample_no = rhs[1] mode = rhs[2][-1] # Now we have Month, Date, Hours, Minutes, Seconds, Sample Number, Mode parsed_filenames.append([month, date, hours, minutes, seconds, sample_no, mode]) parsed_filenames = pd.DataFrame(parsed_filenames, columns=["Month", "Date", "Hours", "Minutes", "Seconds", "Sample Number", "Mode"]) for i in parsed_filenames.columns: parsed_filenames[i] = pd.to_numeric(parsed_filenames[i], errors='coerce') path = '/kaggle/input/one-year-industrial-component-degradation/' df = pd.DataFrame() #f = pd.read_csv(path+filenames[0]+".csv") #f = f.join(parsed_filenames[0:1], how='left') #f = f.fillna(method='ffill') #f for ind, file in enumerate(filenames): file_content = pd.read_csv(path+file+".csv") file_content = file_content.join(parsed_filenames[ind:ind+1], how='left') file_content.fillna(method='ffill', inplace=True) if df.empty: df = file_content df.fillna(method='ffill', inplace=True) else: df = df.append(file_content, ignore_index=True) df.fillna(method='ffill', inplace=True) for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') df.info() """
code
34140277/cell_31
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.ensemble import IsolationForest from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns anomaly_ind = data[data['anomaly'] == -1].index normal_ind = data[data['anomaly'] != -1].index anomaly_pca = pd.DataFrame(data_pca[anomaly_ind]) normal_pca = pd.DataFrame(data_pca[normal_ind]) anomaly_pca data.columns import matplotlib.pyplot as plt import seaborn as sns features = ['pCut::Motor_Torque', 'pCut::CTRL_Position_controller::Lag_error', 'pCut::CTRL_Position_controller::Actual_position', 'pCut::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Actual_position', 'pSvolFilm::CTRL_Position_controller::Actual_speed', 'pSvolFilm::CTRL_Position_controller::Lag_error', 'pSpintor::VAX_speed', 'Mode'] data = df.copy() data = data[:10000] data = data.drop(['timestamp', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.ensemble import IsolationForest rs = np.random.RandomState(0) clf = IsolationForest(max_samples=100, random_state=rs, contamination=0.1) clf.fit(data) if_scores = clf.decision_function(data) if_anomalies = clf.predict(data) if_anomalies = pd.Series(if_anomalies).replace([-1, 1], [1, 0]) plt.figure(figsize=(12, 8)) plt.hist(if_scores) plt.title('Histogram of Avg Anomaly Scores: Lower => More Anomalous')
code
34140277/cell_24
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data.columns data.columns
code
34140277/cell_14
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape data['anomaly'] = pd.Series(clusters) data['anomaly'].unique()
code
34140277/cell_12
[ "text_plain_output_1.png" ]
from sklearn.cluster import DBSCAN from sklearn.preprocessing import MinMaxScaler import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') data = df.copy() data = data[:10000] data = data.drop(['Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month'], axis=1) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() num2 = scaler.fit_transform(data.drop(['timestamp'], axis=1)) num2 = pd.DataFrame(num2, columns=data.drop(['timestamp'], axis=1).columns) from sklearn.cluster import DBSCAN outlier_detection = DBSCAN(eps=0.2, metric='euclidean', min_samples=5, n_jobs=-1) clusters = outlier_detection.fit_predict(num2) clusters.shape
code
34140277/cell_5
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_9.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/combineddataset/Combined.csv') for i in ['Mode', 'Sample Number', 'Seconds', 'Minutes', 'Hours', 'Date', 'Month']: df[i] = pd.to_numeric(df[i], downcast='integer') df.info()
code
89129165/cell_34
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape all_books = ' '.join((token for token in books['Title'])) stopwords = set(STOPWORDS) font_path = '../input/newghanesfont/NewGhanesFont.otf' wordcloud = WordCloud(stopwords=stopwords, font_path=font_path, max_words=500, max_font_size=350, random_state=42, width=2500, height=1000, colormap='twilight_shifted_r') wordcloud.generate(all_books) plt.axis('off') books.Lang_Code.unique() books.Lang_Code.nunique() sns.set_style('whitegrid') sns.set_style('whitegrid') plt.figure(figsize=(12, 4)) sns.distplot(books.Avg_Rating, bins=30, norm_hist=False, color='Purple')
code
89129165/cell_23
[ "text_plain_output_1.png" ]
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape all_books = ' '.join((token for token in books['Title'])) stopwords = set(STOPWORDS) font_path = '../input/newghanesfont/NewGhanesFont.otf' wordcloud = WordCloud(stopwords=stopwords, font_path=font_path, max_words=500, max_font_size=350, random_state=42, width=2500, height=1000, colormap='twilight_shifted_r') wordcloud.generate(all_books) plt.figure(figsize=(16, 8)) plt.imshow(wordcloud, interpolation='bilinear') plt.axis('off') plt.show()
code
89129165/cell_30
[ "image_output_1.png" ]
from wordcloud import WordCloud,ImageColorGenerator,STOPWORDS import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape all_books = ' '.join((token for token in books['Title'])) stopwords = set(STOPWORDS) font_path = '../input/newghanesfont/NewGhanesFont.otf' wordcloud = WordCloud(stopwords=stopwords, font_path=font_path, max_words=500, max_font_size=350, random_state=42, width=2500, height=1000, colormap='twilight_shifted_r') wordcloud.generate(all_books) plt.axis('off') books.Lang_Code.unique() books.Lang_Code.nunique() plt.hist(books.Lang_Code)
code
89129165/cell_20
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books[['Title', 'Lang_Code']][books['Title'].str.find('Harry Potter') != -1]
code
89129165/cell_6
[ "image_output_1.png" ]
import os import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
89129165/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import warnings import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from wordcloud import WordCloud, ImageColorGenerator, STOPWORDS pd.set_option('display.max_colwidth', None) import warnings warnings.filterwarnings('ignore') custom_colors = ['#74a09e', '#86c1b2', '#98e2c6', '#f3c969', '#f2a553', '#d96548', '#c14953'] sns.set_style('whitegrid') sns.despine(left=True, bottom=True) books = pd.read_csv('/kaggle/input/goodreadsbooks/books.csv', error_bad_lines=False) books[:10] books.set_index('bookID', inplace=True) books.index.rename('BookID') books = books.drop(columns=['isbn', 'isbn13']) books.columns books.columns = ['Title', 'Authors', 'Avg_Rating', 'Lang_Code', '#Pages', '#Ratings', '#Text_Reviews', 'Publication_Date', 'Publisher'] books.shape books.Lang_Code.unique() books.Lang_Code.nunique()
code