path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
2035143/cell_7 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn import preprocessing,cross_validation,neighbors
from sklearn import tree
from sklearn.model_selection import cross_val_score
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from sklearn import tree
import graphviz
from sklearn.model_selection import cross_val_score
df = pd.read_csv('../input/glass.csv')
X = np.array(df.drop(['Type'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['Type'])
y = to_categorical(y)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
model = Sequential()
model.add(Dense(50, activation='relu', input_shape=(X.shape[1],)))
model.add(Dense(25, activation='relu'))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, epochs=50, batch_size=5, validation_split=0.2, verbose=True)
scores = model.evaluate(X, y)
trs = tree.DecisionTreeClassifier(max_depth=3)
cross_val_score(trs, X, y, cv=5) | code |
2035143/cell_3 | [
"text_plain_output_1.png"
] | from keras.utils import to_categorical
from sklearn import preprocessing,cross_validation,neighbors
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from sklearn import tree
import graphviz
from sklearn.model_selection import cross_val_score
df = pd.read_csv('../input/glass.csv')
X = np.array(df.drop(['Type'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['Type'])
y = to_categorical(y)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print('accuracy', accuracy) | code |
2035143/cell_5 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from keras.utils import to_categorical
from sklearn import preprocessing,cross_validation,neighbors
import numpy as np
import pandas as pd
import numpy as np
import pandas as pd
from sklearn import preprocessing, cross_validation, neighbors
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
from sklearn import tree
import graphviz
from sklearn.model_selection import cross_val_score
df = pd.read_csv('../input/glass.csv')
X = np.array(df.drop(['Type'], 1).astype(float))
X = preprocessing.scale(X)
y = np.array(df['Type'])
y = to_categorical(y)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.2)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
model = Sequential()
model.add(Dense(50, activation='relu', input_shape=(X.shape[1],)))
model.add(Dense(25, activation='relu'))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, epochs=50, batch_size=5, validation_split=0.2, verbose=True)
scores = model.evaluate(X, y)
print('\n%s: %.2f%%' % (model.metrics_names[1], scores[1] * 100)) | code |
128048859/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60) | code |
128048859/cell_9 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
test.head() | code |
128048859/cell_20 | [
"text_html_output_1.png"
] | import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original = original.rename(columns={'Row': 'id'})
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
import math
train_data=train.drop(columns=['id','yield'],axis=1)
test_data=test.drop(columns='id',axis=1)
original_data=original.drop(columns=['yield','id'],axis=1)
numerical_columns = train_data.select_dtypes(include=['float64', 'int64']).columns.tolist()
# calculate number of rows and columns needed for subplots
num_plots = len(numerical_columns)
num_rows = math.ceil(num_plots/3)
num_cols = min(num_plots, 3)
# create subplots
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(12, 4*num_rows))
# loop over columns and plot distribution
plot_idx = 0
for i in range(num_rows):
for j in range(num_cols):
if plot_idx >= num_plots:
break
if (train[numerical_columns[plot_idx]].count() > 0) and (test[numerical_columns[plot_idx]].count() > 0):
sns.kdeplot(train_data[numerical_columns[plot_idx]], ax=axes[i][j], color='red', label='train')
sns.kdeplot(test_data[numerical_columns[plot_idx]], ax=axes[i][j], color='green', label='test')
sns.kdeplot(original_data[numerical_columns[plot_idx]], ax=axes[i][j], color='yellow', label='original')
axes[i][j].set_xlabel(numerical_columns[plot_idx])
axes[i][j].legend()
plot_idx += 1
else:
# empty plot, no data to plot
axes[i][j].axis('off')
plt.tight_layout()
plt.show()
def heatmap(dataset, title):
corr = dataset.corr()
fig, axes = plt.subplots(figsize=(20, 10))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, linewidths=0.5, mask=mask, cmap='plasma', annot=True)
plt.title(title, fontsize=30)
plt.show()
heatmap(train_data, 'Train Dataset Correlation')
heatmap(original_data, 'original data correlation')
heatmap(test_data, 'test data correlation') | code |
128048859/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, GridSearchCV
from xgboost import XGBRegressor
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original = original.rename(columns={'Row': 'id'})
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
final_train = pd.concat([train, original])
import math
train_data=train.drop(columns=['id','yield'],axis=1)
test_data=test.drop(columns='id',axis=1)
original_data=original.drop(columns=['yield','id'],axis=1)
numerical_columns = train_data.select_dtypes(include=['float64', 'int64']).columns.tolist()
# calculate number of rows and columns needed for subplots
num_plots = len(numerical_columns)
num_rows = math.ceil(num_plots/3)
num_cols = min(num_plots, 3)
# create subplots
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(12, 4*num_rows))
# loop over columns and plot distribution
plot_idx = 0
for i in range(num_rows):
for j in range(num_cols):
if plot_idx >= num_plots:
break
if (train[numerical_columns[plot_idx]].count() > 0) and (test[numerical_columns[plot_idx]].count() > 0):
sns.kdeplot(train_data[numerical_columns[plot_idx]], ax=axes[i][j], color='red', label='train')
sns.kdeplot(test_data[numerical_columns[plot_idx]], ax=axes[i][j], color='green', label='test')
sns.kdeplot(original_data[numerical_columns[plot_idx]], ax=axes[i][j], color='yellow', label='original')
axes[i][j].set_xlabel(numerical_columns[plot_idx])
axes[i][j].legend()
plot_idx += 1
else:
# empty plot, no data to plot
axes[i][j].axis('off')
plt.tight_layout()
plt.show()
def heatmap(dataset,title):
corr = dataset.corr()
fig, axes = plt.subplots(figsize=(20, 10))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, linewidths=.5, mask=mask, cmap='plasma', annot=True,)
plt.title(title, fontsize=30)
plt.show()
# plot_correlation_heatmap(original, 'Original Dataset Correlation')
heatmap(train_data, 'Train Dataset Correlation')
heatmap(original_data,'original data correlation')
heatmap(test_data,'test data correlation')
train.drop('id', axis=1, inplace=True)
X = train.drop('yield', axis=1)
Y = train['yield']
test.set_index('id', inplace=True)
from sklearn.metrics import mean_absolute_error
cv_scores = list()
importance_xgb = list()
preds = list()
for i in range(3):
skf = KFold(n_splits=3, random_state=1004, shuffle=True)
for train_ix, test_ix in skf.split(X, Y):
X_train, X_test = (X.iloc[train_ix], X.iloc[test_ix])
Y_train, Y_test = (Y.iloc[train_ix], Y.iloc[test_ix])
XGB_md = XGBRegressor(tree_method='gpu_hist', objective='reg:squarederror', colsample_bytree=0.8, gamma=0.8, learning_rate=0.01, max_depth=5, min_child_weight=10, n_estimators=1000, subsample=0.8).fit(X_train, Y_train)
importance_xgb.append(XGB_md.feature_importances_)
XGB_pred_1 = XGB_md.predict(X_test)
cv_scores.append(mean_absolute_error(Y_test, XGB_pred_1))
scores = np.mean(cv_scores)
preds = XGB_md.predict(test)
preds_df = pd.DataFrame(preds, index=test.index, columns=['yield'])
preds_df['id'] = test.index
preds_df = preds_df[['id', 'yield']]
preds_df.to_csv('submission.csv', index=False)
preds_df | code |
128048859/cell_2 | [
"text_html_output_1.png"
] | import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore')
import xgboost as xgb
from xgboost.callback import EarlyStopping
from sklearn import model_selection
from sklearn import metrics
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, GridSearchCV | code |
128048859/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
train.info() | code |
128048859/cell_7 | [
"image_output_3.png",
"image_output_2.png",
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original.head() | code |
128048859/cell_18 | [
"text_plain_output_1.png"
] | import math
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original = original.rename(columns={'Row': 'id'})
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
import math
train_data = train.drop(columns=['id', 'yield'], axis=1)
test_data = test.drop(columns='id', axis=1)
original_data = original.drop(columns=['yield', 'id'], axis=1)
numerical_columns = train_data.select_dtypes(include=['float64', 'int64']).columns.tolist()
num_plots = len(numerical_columns)
num_rows = math.ceil(num_plots / 3)
num_cols = min(num_plots, 3)
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(12, 4 * num_rows))
plot_idx = 0
for i in range(num_rows):
for j in range(num_cols):
if plot_idx >= num_plots:
break
if train[numerical_columns[plot_idx]].count() > 0 and test[numerical_columns[plot_idx]].count() > 0:
sns.kdeplot(train_data[numerical_columns[plot_idx]], ax=axes[i][j], color='red', label='train')
sns.kdeplot(test_data[numerical_columns[plot_idx]], ax=axes[i][j], color='green', label='test')
sns.kdeplot(original_data[numerical_columns[plot_idx]], ax=axes[i][j], color='yellow', label='original')
axes[i][j].set_xlabel(numerical_columns[plot_idx])
axes[i][j].legend()
plot_idx += 1
else:
axes[i][j].axis('off')
plt.tight_layout()
plt.show() | code |
128048859/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original = original.rename(columns={'Row': 'id'})
original.head() | code |
128048859/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
sns.displot(train, x='yield', color='green') | code |
128048859/cell_24 | [
"text_plain_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, GridSearchCV
from xgboost import XGBRegressor
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
original = original.rename(columns={'Row': 'id'})
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
import math
train_data=train.drop(columns=['id','yield'],axis=1)
test_data=test.drop(columns='id',axis=1)
original_data=original.drop(columns=['yield','id'],axis=1)
numerical_columns = train_data.select_dtypes(include=['float64', 'int64']).columns.tolist()
# calculate number of rows and columns needed for subplots
num_plots = len(numerical_columns)
num_rows = math.ceil(num_plots/3)
num_cols = min(num_plots, 3)
# create subplots
fig, axes = plt.subplots(nrows=num_rows, ncols=num_cols, figsize=(12, 4*num_rows))
# loop over columns and plot distribution
plot_idx = 0
for i in range(num_rows):
for j in range(num_cols):
if plot_idx >= num_plots:
break
if (train[numerical_columns[plot_idx]].count() > 0) and (test[numerical_columns[plot_idx]].count() > 0):
sns.kdeplot(train_data[numerical_columns[plot_idx]], ax=axes[i][j], color='red', label='train')
sns.kdeplot(test_data[numerical_columns[plot_idx]], ax=axes[i][j], color='green', label='test')
sns.kdeplot(original_data[numerical_columns[plot_idx]], ax=axes[i][j], color='yellow', label='original')
axes[i][j].set_xlabel(numerical_columns[plot_idx])
axes[i][j].legend()
plot_idx += 1
else:
# empty plot, no data to plot
axes[i][j].axis('off')
plt.tight_layout()
plt.show()
def heatmap(dataset,title):
corr = dataset.corr()
fig, axes = plt.subplots(figsize=(20, 10))
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True
sns.heatmap(corr, linewidths=.5, mask=mask, cmap='plasma', annot=True,)
plt.title(title, fontsize=30)
plt.show()
# plot_correlation_heatmap(original, 'Original Dataset Correlation')
heatmap(train_data, 'Train Dataset Correlation')
heatmap(original_data,'original data correlation')
heatmap(test_data,'test data correlation')
train.drop('id', axis=1, inplace=True)
X = train.drop('yield', axis=1)
Y = train['yield']
from sklearn.metrics import mean_absolute_error
cv_scores = list()
importance_xgb = list()
preds = list()
for i in range(3):
print(f'{i} fold cv begin')
skf = KFold(n_splits=3, random_state=1004, shuffle=True)
for train_ix, test_ix in skf.split(X, Y):
X_train, X_test = (X.iloc[train_ix], X.iloc[test_ix])
Y_train, Y_test = (Y.iloc[train_ix], Y.iloc[test_ix])
XGB_md = XGBRegressor(tree_method='gpu_hist', objective='reg:squarederror', colsample_bytree=0.8, gamma=0.8, learning_rate=0.01, max_depth=5, min_child_weight=10, n_estimators=1000, subsample=0.8).fit(X_train, Y_train)
importance_xgb.append(XGB_md.feature_importances_)
XGB_pred_1 = XGB_md.predict(X_test)
cv_scores.append(mean_absolute_error(Y_test, XGB_pred_1))
print(f'{i} fold cv done')
scores = np.mean(cv_scores)
print('The average RMSE over 3-folds (run 3 times) is:', scores) | code |
128048859/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60)
desc = test.describe().T
desc['nunique'] = test.nunique()
desc['%unique'] = desc['nunique'] / len(test) * 100
desc['null'] = test.isna().sum()
desc['type'] = test.dtypes
desc.head(60)
print(f'There are {train.duplicated(subset=list(train)[0:-1]).value_counts()[0]} non-duplicate values out of {train.count()[0]} rows in train dataset')
print(f'There are {test.duplicated().value_counts()[0]} non-duplicate values out of {test.count()[0]} rows in test dataset') | code |
128048859/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
train['yield'].value_counts() | code |
128048859/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
desc = train.describe().T
desc['nunique'] = train.nunique()
desc['%unique'] = desc['nunique'] / len(train) * 100
desc['null'] = train.isna().sum()
desc['type'] = train.dtypes
desc.head(60) | code |
128048859/cell_5 | [
"image_output_1.png"
] | import pandas as pd
train = pd.read_csv('/kaggle/input/playground-series-s3e14/train.csv')
test = pd.read_csv('/kaggle/input/playground-series-s3e14/test.csv')
original = pd.read_csv('/kaggle/input/originall/dataa.csv')
train.head() | code |
105186901/cell_4 | [
"text_plain_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
base_image_dir = os.path.join('..', 'input', 'diabetic-retinopathy-detection')
retina_df = pd.read_csv(os.path.join(base_image_dir, 'trainLabels.csv.zip'))
retina_df
train = retina_df['image']
train | code |
105186901/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105186901/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True)
x_train = train_datagen.flow_from_directory('../input/diabetic-retinopathy-detection/train.zip.001', batch_size=64) | code |
105186901/cell_3 | [
"text_html_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
base_image_dir = os.path.join('..', 'input', 'diabetic-retinopathy-detection')
retina_df = pd.read_csv(os.path.join(base_image_dir, 'trainLabels.csv.zip'))
retina_df | code |
105186901/cell_5 | [
"text_plain_output_1.png"
] | import os
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
import os
base_image_dir = os.path.join('..', 'input', 'diabetic-retinopathy-detection')
retina_df = pd.read_csv(os.path.join(base_image_dir, 'trainLabels.csv.zip'))
retina_df
test = retina_df['level']
test | code |
104131103/cell_6 | [
"image_output_11.png",
"image_output_24.png",
"image_output_25.png",
"image_output_17.png",
"image_output_30.png",
"image_output_14.png",
"image_output_28.png",
"image_output_23.png",
"image_output_13.png",
"image_output_5.png",
"image_output_18.png",
"image_output_21.png",
"image_output_7.png",
"image_output_20.png",
"image_output_4.png",
"image_output_8.png",
"image_output_16.png",
"image_output_27.png",
"image_output_6.png",
"image_output_12.png",
"image_output_22.png",
"image_output_3.png",
"image_output_29.png",
"image_output_2.png",
"image_output_1.png",
"image_output_10.png",
"image_output_15.png",
"image_output_9.png",
"image_output_19.png",
"image_output_26.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
df = pd.read_csv('../input/california-housing-value/housing.csv')
X = df.drop(columns=['median_house_value'])
y = df['median_house_value']
def preprocess_features(X):
total_bedrooms_mean = round(X['total_bedrooms'].mean(), 3)
X = X.fillna(total_bedrooms_mean)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(X['ocean_proximity'])
transformed = le.transform(X['ocean_proximity'])
X['ocean_proximity'] = transformed
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
X = scale.fit_transform(X)
return X
def run_reg(regressor, x_train, x_test, y_train, y_test):
regressor.fit(x_train, y_train)
prediction = regressor.predict(x_test)
prediction = np.clip(prediction, 15000, 500000)
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_test, prediction)
import matplotlib.pyplot as plt
return mae
from sklearn.svm import SVR
mae_array = np.zeros((5, 6))
cost = [1000, 10000, 100000, 1000000, 10000000]
epsilon = [2000, 5000, 10000, 20000, 50000, 100000]
row = 0
column = 0
for i in cost:
column = 0
for j in epsilon:
svr_reg = SVR(kernel='rbf', C=i, epsilon=j)
mae = run_reg(svr_reg, X_train, X_test, y_train, y_test)
mae_array[row][column] = mae
column = column + 1
row = row + 1 | code |
104131103/cell_8 | [
"image_output_1.png"
] | from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
df = pd.read_csv('../input/california-housing-value/housing.csv')
X = df.drop(columns=['median_house_value'])
y = df['median_house_value']
def preprocess_features(X):
total_bedrooms_mean = round(X['total_bedrooms'].mean(), 3)
X = X.fillna(total_bedrooms_mean)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(X['ocean_proximity'])
transformed = le.transform(X['ocean_proximity'])
X['ocean_proximity'] = transformed
from sklearn.preprocessing import StandardScaler
scale = StandardScaler()
X = scale.fit_transform(X)
return X
def run_reg(regressor, x_train, x_test, y_train, y_test):
regressor.fit(x_train, y_train)
prediction = regressor.predict(x_test)
prediction = np.clip(prediction, 15000, 500000)
from sklearn.metrics import mean_absolute_error
mae = mean_absolute_error(y_test, prediction)
import matplotlib.pyplot as plt
return mae
from sklearn.svm import SVR
mae_array = np.zeros((5, 6))
cost = [1000, 10000, 100000, 1000000, 10000000]
epsilon = [2000, 5000, 10000, 20000, 50000, 100000]
row = 0
column = 0
for i in cost:
column = 0
for j in epsilon:
svr_reg = SVR(kernel='rbf', C=i, epsilon=j)
mae = run_reg(svr_reg, X_train, X_test, y_train, y_test)
mae_array[row][column] = mae
column = column + 1
row = row + 1
import matplotlib.pyplot as plt
plt.contourf([2000, 5000, 10000, 20000, 50000, 100000], [1000, 10000, 100000, 1000000, 10000000], mae_array, 100, cmap='pink')
plt.yscale('log')
plt.xscale('log')
plt.colorbar()
plt.xlabel('Epsilon')
plt.ylabel('Cost')
plt.show() | code |
128020186/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
1008497/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
feature_columns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
X = data[feature_columns]
y = data['Species']
y.head() | code |
1008497/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
sns.violinplot(x='Species', y='PetalWidthCm', data=data) | code |
1008497/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
sns.pairplot(data.drop('Id', axis=1), hue='Species', size=2) | code |
1008497/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr() | code |
1008497/cell_2 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from subprocess import check_output
, # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# ^^^ DEFAULT SETUP ABOVE HERE. EVERYTHING BELOW MUST BE ADDED
import seaborn as sns # plotting
from sklearn import tree # classification tree, see http://scikit-learn.org/stable/modules/tree.html | code |
1008497/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
sns.violinplot(x='Species', y='PetalLengthCm', data=data) | code |
1008497/cell_16 | [
"text_html_output_1.png"
] | from IPython.display import Image
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pydotplus
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
feature_columns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
X = data[feature_columns]
y = data['Species']
X.corr()
clf = tree.DecisionTreeClassifier()
clf = clf.fit(X, y)
with open('iris.dot', 'w') as f:
f = tree.export_graphviz(clf, out_file=f)
import os
os.unlink('iris.dot')
import pydotplus
dot_data = tree.export_graphviz(clf, out_file=None)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_pdf('iris.pdf')
from IPython.display import Image
dot_data = tree.export_graphviz(clf, out_file=None, feature_names=X.columns, class_names=['setosa', 'versicolor', 'virginica'], filled=True, rounded=True, special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
Image(graph.create_png()) | code |
1008497/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('../input/Iris.csv')
data.head() | code |
1008497/cell_14 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
feature_columns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
X = data[feature_columns]
y = data['Species']
X.corr() | code |
1008497/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # plotting
data = pd.read_csv('../input/Iris.csv')
data.drop('Id', axis=1).corr()
feature_columns = ['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
X = data[feature_columns]
y = data['Species']
X.head() | code |
122256289/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
# create multiple subplots along rows and columns
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.show()
# create multiple subplots without overlapping
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.tight_layout() # avoid overlapping
plt.show()
# define the index of subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0,0].set(title='Upper Left [0,0]')
axes[0,1].set(title='Upper Right [0,1]')
axes[1,0].set(title='Lower Left [1,0]')
axes[1,1].set(title='Lower Right [1,1]')
plt.tight_layout()
plt.show()
x = [10, 15, 20, 25, 30, 35, 40]
y = [20, 24, 28, 32, 36, 40, 44]
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(10, 10))
ax[0, 0].plot(x, y)
ax[0, 0].bar(x, y)
ax[0, 1].scatter(x, y)
ax[1, 0].bar(x, y)
ax[1, 1].barh(x, y)
plt.show() | code |
122256289/cell_25 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
# create multiple subplots along rows and columns
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.show()
# create multiple subplots without overlapping
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.tight_layout() # avoid overlapping
plt.show()
# define the index of subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0,0].set(title='Upper Left [0,0]')
axes[0,1].set(title='Upper Right [0,1]')
axes[1,0].set(title='Lower Left [1,0]')
axes[1,1].set(title='Lower Right [1,1]')
plt.tight_layout()
plt.show()
x = [10, 15, 20, 25, 30, 35, 40]
y = [20, 24, 28, 32, 36, 40, 44]
fig, ax = plt.subplots(nrows=2,ncols=2,figsize=(10,10))
ax[0,0].plot(x,y)
ax[0,0].bar(x,y) # can create multiple plots in same subplot
ax[0,1].scatter(x,y)
ax[1,0].bar(x,y)
ax[1,1].barh(x,y)
plt.show()
# line plot with artist element
fig, ax = plt.subplots()
ax.plot(x,y, color='red',linewidth=2,
marker='o', linestyle='--',
label = 'sales')
ax.set(xlim=[0, 50], ylim=[0, 60],
title='Line Chart',
ylabel='Y-Axis', xlabel='X-Axis')
ax.legend()
plt.show()
fig, ax = plt.subplots()
ax.bar(x, y, color='green', label='sales')
ax.set(xlim=[0, 50], ylim=[0, 60], title='Bar Chart', ylabel='Y-Axis', xlabel='X-Axis')
ax.legend()
plt.show() | code |
122256289/cell_23 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
# create multiple subplots along rows and columns
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.show()
# create multiple subplots without overlapping
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.tight_layout() # avoid overlapping
plt.show()
# define the index of subplots
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0,0].set(title='Upper Left [0,0]')
axes[0,1].set(title='Upper Right [0,1]')
axes[1,0].set(title='Lower Left [1,0]')
axes[1,1].set(title='Lower Right [1,1]')
plt.tight_layout()
plt.show()
x = [10, 15, 20, 25, 30, 35, 40]
y = [20, 24, 28, 32, 36, 40, 44]
fig, ax = plt.subplots(nrows=2,ncols=2,figsize=(10,10))
ax[0,0].plot(x,y)
ax[0,0].bar(x,y) # can create multiple plots in same subplot
ax[0,1].scatter(x,y)
ax[1,0].bar(x,y)
ax[1,1].barh(x,y)
plt.show()
fig, ax = plt.subplots()
ax.plot(x, y, color='red', linewidth=2, marker='o', linestyle='--', label='sales')
ax.set(xlim=[0, 50], ylim=[0, 60], title='Line Chart', ylabel='Y-Axis', xlabel='X-Axis')
ax.legend()
plt.show() | code |
122256289/cell_6 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plt.show() | code |
122256289/cell_18 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
# create multiple subplots along rows and columns
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.show()
# create multiple subplots without overlapping
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.tight_layout() # avoid overlapping
plt.show()
fig, axes = plt.subplots(nrows=2, ncols=2)
axes[0, 0].set(title='Upper Left [0,0]')
axes[0, 1].set(title='Upper Right [0,1]')
axes[1, 0].set(title='Lower Left [1,0]')
axes[1, 1].set(title='Lower Right [1,1]')
plt.tight_layout()
plt.show() | code |
122256289/cell_8 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
fig, ax = plt.subplots(figsize=(10, 10))
plt.show() | code |
122256289/cell_15 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
fig, ax = plt.subplots(ncols=2) | code |
122256289/cell_16 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
fig, ax = plt.subplots(nrows=2, ncols=2)
plt.show() | code |
122256289/cell_17 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
# create multiple subplots along rows
fig, ax = plt.subplots(nrows=2)
# create multiple subplots along columns
fig, ax = plt.subplots(ncols=2)
# create multiple subplots along rows and columns
fig, ax = plt.subplots(nrows=2,ncols=2)
plt.show()
fig, ax = plt.subplots(nrows=2, ncols=2)
plt.tight_layout()
plt.show() | code |
122256289/cell_14 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
fig, ax = plt.subplots(nrows=2) | code |
122256289/cell_10 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
fig, ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8], title='An Example Axes', ylabel='Y-Axis', xlabel='X-Axis')
plt.show() | code |
122256289/cell_12 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
# create blank figure
fig, ax = plt.subplots()
plt.show()
# resize figure
fig, ax = plt.subplots(figsize=(10,10))
plt.show()
# set axis with xlim and ylim, title, labels
fig,ax = plt.subplots()
ax.set(xlim=[0.5, 4.5], ylim=[-2, 8],
title='An Example Axes',
ylabel='Y-Axis', xlabel='X-Axis')
plt.show()
plt.savefig('chart1.png')
plt.savefig('chart2.png', transparent=True) | code |
32067582/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
print('Number of unique keywords : ', df.keyword.nunique()) | code |
32067582/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape | code |
32067582/cell_20 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
df.drop('location', axis=1, inplace=True)
df.dropna(inplace=True)
df.shape
df.isnull().sum() | code |
32067582/cell_55 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import nltk
import pandas as pd
import re
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
df.drop('location', axis=1, inplace=True)
df.dropna(inplace=True)
df.shape
df.isnull().sum()
df.reset_index(drop=True, inplace=True)
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
def Lower(text):
return text.lower()
def Tokenisation(text):
return nltk.word_tokenize(text)
Stpwrd_List = stopwords.words('english')
def StopWordsAlphaText(tokenized_text):
filtred_text = []
for word in tokenized_text:
word = word.strip('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
val = re.search('^[a-zA-Z][a-zA-Z0-9]*$', word)
if word not in Stpwrd_List and val is not None:
filtred_text.append(word)
return filtred_text
tag_dict = {'J': wordnet.ADJ, 'N': wordnet.NOUN, 'V': wordnet.VERB, 'R': wordnet.ADV}
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
return tag_dict.get(tag, wordnet.NOUN)
lemmatizer = WordNetLemmatizer()
def Lemmetizer(tokens):
lemmetized_text = []
for word in tokens:
word = lemmatizer.lemmatize(word, get_wordnet_pos(word))
lemmetized_text.append(word)
return lemmetized_text
df.text = df.text.apply(Lower)
df.text = df.text.apply(Tokenisation)
df.text = df.text.apply(StopWordsAlphaText)
df.text = df.text.apply(Lemmetizer)
df.head() | code |
32067582/cell_29 | [
"text_plain_output_1.png"
] | import nltk
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet') | code |
32067582/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts() | code |
32067582/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
df.drop('location', axis=1, inplace=True)
df.dropna(inplace=True)
df.shape | code |
32067582/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.head() | code |
32067582/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
print('Number of unique locations :', df.location.nunique()) | code |
32067582/cell_38 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from nltk.corpus import stopwords
import nltk
import re
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
def Lower(text):
return text.lower()
def Tokenisation(text):
return nltk.word_tokenize(text)
test = Tokenisation('Hello there. How! are you ? this super notebook is about nlp')
Stpwrd_List = stopwords.words('english')
def StopWordsAlphaText(tokenized_text):
filtred_text = []
for word in tokenized_text:
word = word.strip('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
val = re.search('^[a-zA-Z][a-zA-Z0-9]*$', word)
if word not in Stpwrd_List and val is not None:
filtred_text.append(word)
return filtred_text
StopWordsAlphaText(test) | code |
32067582/cell_24 | [
"text_plain_output_1.png"
] | import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
df.drop('location', axis=1, inplace=True)
df.dropna(inplace=True)
df.shape
df.isnull().sum()
df.reset_index(drop=True, inplace=True)
sns.countplot(data=df, x='target') | code |
32067582/cell_14 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts() | code |
32067582/cell_53 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import nltk
import pandas as pd
import re
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum()
df.target.value_counts()
df.keyword.value_counts()
df.drop('location', axis=1, inplace=True)
df.dropna(inplace=True)
df.shape
df.isnull().sum()
df.reset_index(drop=True, inplace=True)
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
def Lower(text):
return text.lower()
def Tokenisation(text):
return nltk.word_tokenize(text)
Stpwrd_List = stopwords.words('english')
def StopWordsAlphaText(tokenized_text):
filtred_text = []
for word in tokenized_text:
word = word.strip('!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~')
val = re.search('^[a-zA-Z][a-zA-Z0-9]*$', word)
if word not in Stpwrd_List and val is not None:
filtred_text.append(word)
return filtred_text
tag_dict = {'J': wordnet.ADJ, 'N': wordnet.NOUN, 'V': wordnet.VERB, 'R': wordnet.ADV}
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
return tag_dict.get(tag, wordnet.NOUN)
lemmatizer = WordNetLemmatizer()
def Lemmetizer(tokens):
lemmetized_text = []
for word in tokens:
word = lemmatizer.lemmatize(word, get_wordnet_pos(word))
lemmetized_text.append(word)
return lemmetized_text
df.text = df.text.apply(Lower)
df.text = df.text.apply(Tokenisation)
df.text = df.text.apply(StopWordsAlphaText)
df.text = df.text.apply(Lemmetizer)
df.head() | code |
32067582/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/nlp-getting-started/train.csv')
df.shape
df.isnull().sum() | code |
105207855/cell_34 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
test_data | code |
105207855/cell_30 | [
"text_plain_output_1.png"
] | from statsmodels.tools.eval_measures import rmse
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
arima_rmse_error = rmse(test_data['Monthly beer production'], arima_pred)
arima_mse_error = arima_rmse_error ** 2
mean_value = df['Monthly beer production'].mean()
print(f'MSE Error: {arima_mse_error}\nRMSE Error: {arima_rmse_error}\nMean: {mean_value}') | code |
105207855/cell_33 | [
"text_plain_output_1.png"
] | from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
test_data['ARIMA_Predictions'] = arima_pred | code |
105207855/cell_6 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
from pandas.plotting import autocorrelation_plot
from pandas import DataFrame
from pandas import concat
import numpy as np
from math import sqrt
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf
from statsmodels.tsa.stattools import pacf
from statsmodels.tsa.arima_model import ARIMA
from scipy.stats import boxcox
import seaborn as sns
sns.set_style('whitegrid')
import matplotlib.pyplot as plt
from matplotlib.pylab import rcParams
from matplotlib import colors
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
105207855/cell_26 | [
"image_output_1.png"
] | from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred | code |
105207855/cell_18 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
rcParams['figure.figsize'] = (12, 8)
a = seasonal_decompose(df['Monthly beer production'], model='add')
plt.figure(figsize=(25, 5))
a = seasonal_decompose(df['Monthly beer production'], model='add')
plt.subplot(1, 3, 1)
plt.plot(a.seasonal)
plt.subplot(1, 3, 2)
plt.plot(a.trend)
plt.subplot(1, 3, 3)
plt.plot(a.resid)
plt.show() | code |
105207855/cell_32 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
rcParams['figure.figsize'] = (12, 8)
a = seasonal_decompose(df['Monthly beer production'], model='add')
a = seasonal_decompose(df['Monthly beer production'], model='add')
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
plt.figure(figsize=(10, 6))
plt.plot(test_data, label='true values', color='blue')
plt.plot(arima_pred, label='forecasts', color='orange')
plt.title('ARIMA Model', size=14)
plt.legend(loc='upper left')
plt.show() | code |
105207855/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.head() | code |
105207855/cell_17 | [
"image_output_1.png"
] | from matplotlib.pylab import rcParams
from statsmodels.tsa.seasonal import seasonal_decompose
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
rcParams['figure.figsize'] = (12, 8)
a = seasonal_decompose(df['Monthly beer production'], model='add')
a.plot() | code |
105207855/cell_24 | [
"image_output_1.png"
] | from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary() | code |
105207855/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.figure(figsize=(18, 9))
plt.plot(df.index, df['Monthly beer production'], linestyle='-')
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
plt.show() | code |
105207855/cell_10 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
df.head() | code |
105207855/cell_27 | [
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"application_vnd.jupyter.stderr_output_5.png",
"text_html_output_1.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from statsmodels.tsa.statespace.sarimax import SARIMAX
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.xlabel = 'Dates'
plt.ylabel = 'Total Production'
train_data = df[:len(df) - 12]
test_data = df[len(df) - 12:]
arima_model = SARIMAX(train_data['Monthly beer production'], order=(2, 1, 1), seasonal_order=(4, 0, 3, 12))
arima_result = arima_model.fit()
arima_result.summary()
arima_pred = arima_result.predict(start=len(train_data), end=len(df) - 1, typ='levels').rename('ARIMA Predictions')
arima_pred
test_data['Monthly beer production'].plot(figsize=(16, 5), legend=True)
arima_pred.plot(legend=True) | code |
105207855/cell_12 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/time-series-datasets/monthly-beer-production-in-austr.csv')
df.Month = pd.to_datetime(df.Month)
df = df.set_index('Month')
rolling_mean = df.rolling(window=12).mean()
rolling_std = df.rolling(window=12).std()
plt.figure(figsize=(10, 6))
plt.plot(df, color='cornflowerblue', label='Original')
plt.plot(rolling_mean, color='firebrick', label='Rolling Mean')
plt.plot(rolling_std, color='limegreen', label='Rolling Std')
plt.xlabel('Date', size=12)
plt.ylabel('Monthly Beer Production', size=12)
plt.legend(loc='upper left')
plt.title('Rolling Statistics', size=14)
plt.show() | code |
1005562/cell_13 | [
"image_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
print('Skewness: %f' % train['SalePrice'].skew())
print('Kurtosis: %f' % train['SalePrice'].kurt()) | code |
1005562/cell_57 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr()
cor_dict = corr['SalePrice'].to_dict()
del cor_dict['SalePrice']
print('List the numerical features decendingly by their correlation with Sale Price:\n')
for ele in sorted(cor_dict.items(), key=lambda x: -abs(x[1])):
print('{0}: \t{1}'.format(*ele)) | code |
1005562/cell_56 | [
"text_plain_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
corr = train.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr()
plt.figure(figsize=(12, 12))
sns.heatmap(corr, vmax=1, square=True) | code |
1005562/cell_33 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
cv_ridge.max() | code |
1005562/cell_55 | [
"text_plain_output_1.png"
] | from scipy.stats import skew
import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
sns.regplot(x='OverallQual', y='SalePrice', data=train, color='Orange') | code |
1005562/cell_26 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
cv_en.plot(title='Validation - Elastic Net')
plt.xlabel('alphas')
plt.ylabel('rmse') | code |
1005562/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
dtrain = xgb.DMatrix(X_train, label=y)
dtest = xgb.DMatrix(X_test)
params = {'max_depth': 2, 'eta': 0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y)
xgb_preds = np.expm1(model_xgb.predict(X_test))
en_preds = np.expm1(model_elasticnet.predict(X_test))
predictions = pd.DataFrame({'en': xgb_preds, 'xgb': xgb_preds})
predictions.plot(x='en', y='xgb', kind='scatter') | code |
1005562/cell_54 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from scipy.stats import skew
import matplotlib
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
print(train.select_dtypes(include=['object']).columns.values) | code |
1005562/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'].describe() | code |
1005562/cell_50 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from keras.regularizers import l1
from scipy.stats import skew
from sklearn.preprocessing import StandardScaler
import matplotlib
import numpy as np
import pandas as pd
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
dtrain = xgb.DMatrix(X_train, label=y)
dtest = xgb.DMatrix(X_test)
params = {'max_depth': 2, 'eta': 0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
X_train = StandardScaler().fit_transform(X_train)
model = Sequential()
model.add(Dense(1, input_dim=X_train.shape[1], W_regularizer=l1(0.001)))
model.compile(loss='mse', optimizer='adam')
model.summary() | code |
1005562/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
1005562/cell_18 | [
"text_plain_output_1.png"
] | from scipy.stats import skew
import matplotlib
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20) | code |
1005562/cell_32 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
alphas = [0.05, 0.1, 0.3, 1, 3, 5, 10, 15, 30, 50, 75]
cv_ridge = [rmse_cv(Ridge(alpha=alpha)).mean() for alpha in alphas]
cv_ridge = pd.Series(cv_ridge, index=alphas)
cv_ridge.plot(title='Validation - Ridge model')
plt.xlabel('alpha')
plt.ylabel('rmse') | code |
1005562/cell_28 | [
"text_html_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, LassoLarsCV
from sklearn.model_selection import cross_val_score
def rmse_cv(model):
rmse = np.sqrt(-cross_val_score(model, X_train, y, scoring='neg_mean_squared_error', cv=5))
return rmse
model_elasticnet = ElasticNet()
alphas_en = [0.001, 0.005, 0.1, 0.2, 0.3]
cv_rmse_en = [rmse_cv(ElasticNet(alpha=alpha)).mean() for alpha in alphas_en]
cv_en = pd.Series(cv_rmse_en, index=alphas_en)
model_elasticnet = ElasticNet(alpha=0.026).fit(X_train, y)
matplotlib.rcParams['figure.figsize'] = (6.0, 6.0)
preds_en = pd.DataFrame({'preds EN': model_elasticnet.predict(X_train), 'true': y})
preds_en['residuals'] = preds_en['true'] - preds_en['preds EN']
preds_en.plot(x='preds EN', y='residuals', kind='scatter') | code |
1005562/cell_8 | [
"image_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
prices.hist() | code |
1005562/cell_38 | [
"text_plain_output_1.png"
] | from scipy.stats import skew
import matplotlib
import numpy as np
import pandas as pd
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
dtrain = xgb.DMatrix(X_train, label=y)
dtest = xgb.DMatrix(X_test)
params = {'max_depth': 2, 'eta': 0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
model_xgb = xgb.XGBRegressor(n_estimators=360, max_depth=2, learning_rate=0.1)
model_xgb.fit(X_train, y) | code |
1005562/cell_46 | [
"text_plain_output_1.png"
] | from keras.layers import Dense
from keras.models import Sequential
from keras.regularizers import l1
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split | code |
1005562/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
sns.distplot(train['SalePrice'], kde=False, color='b', hist_kws={'alpha': 0.9}) | code |
1005562/cell_10 | [
"text_html_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
import seaborn as sns
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
sns.distplot(train['SalePrice']) | code |
1005562/cell_37 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
import matplotlib
import numpy as np
import pandas as pd
import xgboost as xgb
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
matplotlib.rcParams['figure.figsize'] = (12.0, 6.0)
prices = pd.DataFrame({'price': train['SalePrice'], 'log(price + 1)': np.log1p(train['SalePrice'])})
all_data = pd.concat((train.loc[:, 'MSSubClass':'SaleCondition'], test.loc[:, 'MSSubClass':'SaleCondition']))
train['SalePrice'] = np.log1p(train['SalePrice'])
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
total = train.isnull().sum().sort_values(ascending=False)
percent = (train.isnull().sum() / train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.SalePrice
dtrain = xgb.DMatrix(X_train, label=y)
dtest = xgb.DMatrix(X_test)
params = {'max_depth': 2, 'eta': 0.1}
model = xgb.cv(params, dtrain, num_boost_round=500, early_stopping_rounds=100)
model.loc[30:, ['test-rmse-mean', 'train-rmse-mean']].plot() | code |
129012051/cell_33 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np # linear algebra
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
def initialize_weights_and_bias(dimension):
w = np.full((dimension, 1), 0.01)
b = 0.0
return (w, b)
def sigmoid(z):
y_head = 1 / (1 + np.exp(-z))
return y_head
def forward_backward_propagation(w, b, x_train, y_train):
z = np.dot(w.T, x_train) + b
y_head = sigmoid(z)
loss = -y_train * np.log(y_head) - (1 - y_train) * np.log(1 - y_head)
cost = np.sum(loss) / x_train.shape[1]
derivative_weight = np.dot(x_train, (y_head - y_train).T) / x_train.shape[1]
derivative_bias = np.sum(y_head - y_train) / x_train.shape[1]
gradients = {'derivative_weight': derivative_weight, 'derivative_bias': derivative_bias}
return (cost, gradients)
def update(w, b, x_train, y_train, learning_rate, number_of_iterarion):
cost_list = []
cost_list2 = []
index = []
for i in range(number_of_iterarion):
cost, gradients = forward_backward_propagation(w, b, x_train, y_train)
cost_list.append(cost)
w = w - learning_rate * gradients['derivative_weight']
b = b - learning_rate * gradients['derivative_bias']
if i % 10 == 0:
cost_list2.append(cost)
index.append(i)
parameters = {'weight': w, 'bias': b}
plt.xticks(index, rotation='vertical')
return (parameters, gradients, cost_list)
def predict(w, b, x_test):
z = sigmoid(np.dot(w.T, x_test) + b)
Y_prediction = np.zeros((1, x_test.shape[1]))
for i in range(z.shape[1]):
if z[0, i] <= 0.5:
Y_prediction[0, i] = 0
else:
Y_prediction[0, i] = 1
return Y_prediction
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate, num_iterations):
dimension = x_train.shape[0]
w, b = initialize_weights_and_bias(dimension)
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate, num_iterations)
y_prediction_test = predict(parameters['weight'], parameters['bias'], x_test)
print('test accuracy: {} %'.format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
logistic_regression(x_train, y_train, x_test, y_test, learning_rate=1, num_iterations=300) | code |
129012051/cell_2 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from PIL import Image
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
129012051/cell_7 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
data = pd.read_csv('/kaggle/input/datacsv/data.csv')
data.info() | code |
128047268/cell_9 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
GM_df = pd.read_excel('/kaggle/input/juan-anyang/GM.xlsx')
Mining_df = pd.read_excel('/kaggle/input/juan-anyang/Mining.xlsx')
GM_df.fillna(0, inplace=True)
Mining_df.fillna(0, inplace=True)
GM_df['Reading No'] = GM_df['Reading No'].astype(str)
Mining_df['Reading No'] = Mining_df['Reading No'].astype(str)
dummies_GM = pd.get_dummies(GM_df['Type'])
GM_df = pd.concat([GM_df, dummies_GM], axis=1)
GM_df.columns
dummies_Mining = pd.get_dummies(Mining_df['Type'])
Mining_df = pd.concat([Mining_df, dummies_Mining], axis=1)
Mining_df.columns
Mining_df[['A', 'B', 'C', 'D']] = Mining_df[['A', 'B', 'C', 'D']].astype(int)
corr_Mining = Mining_df[['Level', 'Type', 'A', 'B', 'C', 'D', 'Sn', 'Pb', 'Cu', 'P', 'Cl', 'S']].corr()
f, ax = plt.subplots(figsize=(9, 9))
mask = np.zeros_like(corr_Mining, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr_Mining, mask=mask, cmap=cmap, square=True, annot=True, linewidths=0.5, fmt='.1f', ax=ax) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.