path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
73072707/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
dtrain = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/train.csv')
dtest = pd.read_csv('/kaggle/input/house-prices-advanced-regression-techniques/test.csv')
dtrain
total = dtrain.isnull().sum().sort_values(ascending=False)
percent = (dtrain.isnull().sum() / dtrain.isnull().count()).sort_values(ascending=False)
missing_values = pd.concat([total, percent], axis=1, keys=['total', 'percent'])
dtrain = dtrain.drop(missing_values[missing_values['percent'] > 0.8].index, 1)
dtest = dtest.drop(missing_values[missing_values['percent'] > 0.8].index, 1)
dtrain.isnull().sum().sort_values(ascending=False).head(13)
dtrain['Electrical'] | code |
106192046/cell_42 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
logistic_1 = LogisticRegression(class_weight='balanced', C=best_C)
log_1_model = logistic_1.fit(X_train_scaled, y_train)
Y_train_pred = log_1_model.predict(X_train_scaled)
Y_test_pred = log_1_model.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
df_eval | code |
106192046/cell_21 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
#Heatmap
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train_data.corr(),
xticklabels=train_data.corr().columns.values,
yticklabels=train_data.corr().columns.values,annot= True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
objList = train_data.select_dtypes(include='object').columns
le = LabelEncoder()
for feat in objList:
train_data[feat] = le.fit_transform(train_data[feat].astype(str))
print(train_data.info()) | code |
106192046/cell_9 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
for a in train_data.columns:
if len(train_data[a].unique()) == train_data.shape[0]:
print(a) | code |
106192046/cell_25 | [
"text_html_output_1.png"
] | X_train.shape | code |
106192046/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.describe() | code |
106192046/cell_30 | [
"image_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
X_test_scaled.head() | code |
106192046/cell_44 | [
"image_output_1.png"
] | from plot_metric.functions import BinaryClassification
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
#Heatmap
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train_data.corr(),
xticklabels=train_data.corr().columns.values,
yticklabels=train_data.corr().columns.values,annot= True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
plt.xscale('log')
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
logistic_1 = LogisticRegression(class_weight='balanced', C=best_C)
log_1_model = logistic_1.fit(X_train_scaled, y_train)
Y_train_pred = log_1_model.predict(X_train_scaled)
Y_test_pred = log_1_model.predict(X_test_scaled)
from plot_metric.functions import BinaryClassification
bc = BinaryClassification(y_test, Y_test_pred, labels=['Class 1', 'Class 2'])
plt.figure(figsize=(5, 5))
bc.plot_roc_curve()
plt.show() | code |
106192046/cell_20 | [
"text_plain_output_1.png"
] | import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
#Heatmap
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train_data.corr(),
xticklabels=train_data.corr().columns.values,
yticklabels=train_data.corr().columns.values,annot= True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
objList = train_data.select_dtypes(include='object').columns
print(objList) | code |
106192046/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
getDatasetDetail(train_data) | code |
106192046/cell_39 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
print(' The highest test sensitivity is {0} at C = {1}'.format(best_score, best_C)) | code |
106192046/cell_48 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
logistic_1 = LogisticRegression(class_weight='balanced', C=best_C)
log_1_model = logistic_1.fit(X_train_scaled, y_train)
Y_train_pred = log_1_model.predict(X_train_scaled)
Y_test_pred = log_1_model.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
param_grid = {'max_depth': range(10, 20, 10), 'min_samples_leaf': range(50, 150, 50), 'min_samples_split': range(50, 150, 50)}
dtree = DecisionTreeClassifier(class_weight='balanced', random_state=42)
grid_search = GridSearchCV(estimator=dtree, param_grid=param_grid, scoring='recall', cv=5, verbose=1)
grid_search.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results
print('Best score:-', grid_search.best_score_)
print(grid_search.best_estimator_) | code |
106192046/cell_11 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
getDatasetDetail(train_data) | code |
106192046/cell_50 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
logistic_1 = LogisticRegression(class_weight='balanced', C=best_C)
log_1_model = logistic_1.fit(X_train_scaled, y_train)
Y_train_pred = log_1_model.predict(X_train_scaled)
Y_test_pred = log_1_model.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
param_grid = {'max_depth': range(10, 20, 10), 'min_samples_leaf': range(50, 150, 50), 'min_samples_split': range(50, 150, 50)}
dtree = DecisionTreeClassifier(class_weight='balanced', random_state=42)
grid_search = GridSearchCV(estimator=dtree, param_grid=param_grid, scoring='recall', cv=5, verbose=1)
grid_search.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results
dt = DecisionTreeClassifier(class_weight='balanced', criterion='gini', max_depth=10, min_samples_leaf=50, min_samples_split=50, random_state=42)
dt_model = dt.fit(X_train_scaled, y_train)
Y_train_pred = dt_model.predict(X_train_scaled)
Y_test_pred = dt_model.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
df_eval | code |
106192046/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
106192046/cell_18 | [
"text_html_output_1.png"
] | import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train_data.corr(), xticklabels=train_data.corr().columns.values, yticklabels=train_data.corr().columns.values, annot=True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show() | code |
106192046/cell_28 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_train_scaled.head() | code |
106192046/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
getDatasetDetail(train_data) | code |
106192046/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes | code |
106192046/cell_38 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
#Heatmap
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(train_data.corr(),
xticklabels=train_data.corr().columns.values,
yticklabels=train_data.corr().columns.values,annot= True)
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plt.show()
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
plt.figure(figsize=(8, 6))
plt.plot(cv_results['param_C'], cv_results['mean_test_score'])
plt.plot(cv_results['param_C'], cv_results['mean_train_score'])
plt.xlabel('C')
plt.ylabel('sensitivity')
plt.legend(['test result', 'train result'], loc='upper left')
plt.xscale('log') | code |
106192046/cell_47 | [
"text_html_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results
best_score = model_cv.best_score_
best_C = model_cv.best_params_['C']
logistic_1 = LogisticRegression(class_weight='balanced', C=best_C)
log_1_model = logistic_1.fit(X_train_scaled, y_train)
Y_train_pred = log_1_model.predict(X_train_scaled)
Y_test_pred = log_1_model.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
param_grid = {'max_depth': range(10, 20, 10), 'min_samples_leaf': range(50, 150, 50), 'min_samples_split': range(50, 150, 50)}
dtree = DecisionTreeClassifier(class_weight='balanced', random_state=42)
grid_search = GridSearchCV(estimator=dtree, param_grid=param_grid, scoring='recall', cv=5, verbose=1)
grid_search.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results | code |
106192046/cell_3 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.head() | code |
106192046/cell_17 | [
"text_html_output_1.png"
] | import math
import matplotlib.pyplot as plt
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
train_data.drop(['Ticket'], axis=1, inplace=True)
train_data.dtypes
plt.figure(figsize=(20, 10))
rows = int(math.ceil(len(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']) / 4))
cols = 4
for i, n in enumerate(['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']):
plt.subplot(rows, cols, i + 1)
sns.countplot(x=n, data=train_data)
plt.show() | code |
106192046/cell_35 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
df_eval | code |
106192046/cell_43 | [
"text_html_output_1.png"
] | !pip install plot_metric | code |
106192046/cell_46 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
param_grid = {'max_depth': range(10, 20, 10), 'min_samples_leaf': range(50, 150, 50), 'min_samples_split': range(50, 150, 50)}
dtree = DecisionTreeClassifier(class_weight='balanced', random_state=42)
grid_search = GridSearchCV(estimator=dtree, param_grid=param_grid, scoring='recall', cv=5, verbose=1)
grid_search.fit(X_train_scaled, y_train) | code |
106192046/cell_14 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
train_data = train_data.loc[:, train_data.isnull().sum() / len(train_data) * 100 < 60]
train_data.drop(['PassengerId', 'Name'], axis=1, inplace=True)
def fill_Missing_Values(df):
miss_col = df.isnull().sum()
miss_col = miss_col[miss_col > 0]
for column in miss_col.index:
if df[column].dtype.name == 'object':
df[column].fillna(df[column].mode()[0], inplace=True)
elif df[column].dtype.name == 'float64' or df[column].dtype.name == 'int64' or df[column].dtype.name == 'int32':
df[column] = df[column].fillna(df[column].median())
return df
train_data = fill_Missing_Values(train_data)
getDatasetDetail(train_data) | code |
106192046/cell_37 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, log_loss, f1_score, classification_report, roc_curve, plot_roc_curve
from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
X_test_sc = scaler.transform(X_test)
X_test_scaled = pd.DataFrame(X_test_sc, columns=X_test.columns)
def evaluate_model(actual, pred):
acc_sc = round(accuracy_score(actual, pred) * 100, 2)
prec_sc = round(precision_score(actual, pred) * 100, 2)
rec_sc = round(recall_score(actual, pred) * 100, 2)
confusion_m = confusion_matrix(actual, pred)
TP = confusion_m[1, 1]
TN = confusion_m[0, 0]
FP = confusion_m[0, 1]
FN = confusion_m[1, 0]
Specificity = round(TN / float(TN + FP), 2)
roc_score = round(recall_score(actual, pred) * 100, 2)
f1_score = round(2 * (prec_sc * rec_sc / (prec_sc + rec_sc)), 2)
return {'TP': TP, 'TN': TN, 'FP': FP, 'FN': FN, 'Recall': rec_sc, 'Precision': prec_sc, 'Specificity': Specificity, 'ROC/AUC Score': roc_score, 'F1-Score': f1_score, 'Accuracy': acc_sc}
model_lg = LogisticRegression(random_state=42)
model_lg.fit(X_train_scaled, y_train)
Y_train_pred = model_lg.predict(X_train_scaled)
Y_test_pred = model_lg.predict(X_test_scaled)
train_eval = pd.DataFrame([evaluate_model(y_train, Y_train_pred)])
test_eval = pd.DataFrame([evaluate_model(y_test, Y_test_pred)])
df_eval = pd.concat([train_eval, test_eval])
df_eval['data'] = ['train_data', 'test_data']
df_eval.set_index('data', inplace=True)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results | code |
106192046/cell_5 | [
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape | code |
106192046/cell_36 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_data = pd.read_csv('../input/titanic/train.csv')
train_data.shape
def getDatasetDetail(data):
return pd.DataFrame({'Datatype': data.dtypes.astype(str), 'Non_Null_Count': data.count(axis=0).astype(int), 'Null_Count': data.isnull().sum().astype(int), 'Null_Percentage': round(data.isnull().sum() / len(data) * 100, 2), 'Unique_Values_Count': data.nunique().astype(int)}).sort_values(by='Null_Percentage', ascending=False)
X_train.shape
scaler = StandardScaler()
X_train_sc = scaler.fit_transform(X_train)
X_train_scaled = pd.DataFrame(X_train_sc, columns=X_train.columns)
folds = KFold(n_splits=10, shuffle=True, random_state=4)
params = {'C': [0.01, 0.1, 1, 10, 100, 1000]}
model_cv = GridSearchCV(estimator=LogisticRegression(), param_grid=params, scoring='recall', cv=folds, verbose=1, return_train_score=True)
model_cv.fit(X_train_scaled, y_train) | code |
16115529/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import fastai
from fastai.train import Learner
from fastai.train import DataBunch
from fastai.callbacks import GeneralScheduler, TrainingPhase
from fastai.basic_data import DatasetType
import fastprogress
from fastprogress import force_console_behavior
import numpy as np
from pprint import pprint
import pandas as pd
import os
import time
import gc
import random
from tqdm._tqdm_notebook import tqdm_notebook as tqdm
from keras.preprocessing import text, sequence
import torch
from torch import nn
from torch.utils import data
from torch.nn import functional as F
import torch.utils.data
from tqdm import tqdm
import warnings
from nltk.tokenize.treebank import TreebankWordTokenizer
from scipy.stats import rankdata
from gensim.models import KeyedVectors
from sklearn.metrics import roc_auc_score
import copy | code |
16115529/cell_10 | [
"text_plain_output_4.png",
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from fastai.callbacks import GeneralScheduler, TrainingPhase
from fastprogress import force_console_behavior
from gensim.models import KeyedVectors
from keras.preprocessing import text, sequence
from scipy.stats import rankdata
from torch import nn
from torch.utils import data
from tqdm import tqdm
import copy
import fastai
import fastprogress
import gc
import numpy as np
import numpy as np
import os
import pandas as pd
import pandas as pd
import random
import torch
import warnings
def convert_lines(example, max_seq_length,tokenizer):
max_seq_length -=2
all_tokens = []
longer = 0
for text in tqdm(example):
tokens_a = tokenizer.tokenize(text)
if len(tokens_a)>max_seq_length:
tokens_a = tokens_a[:max_seq_length]
longer += 1
one_token = tokenizer.convert_tokens_to_ids(["[CLS]"]+tokens_a+["[SEP]"])+[0] * (max_seq_length - len(tokens_a))
all_tokens.append(one_token)
return np.array(all_tokens)
def is_interactive():
return 'SHLVL' not in os.environ
def seed_everything(seed=123):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def get_coefs(word, *arr):
return word, np.asarray(arr, dtype='float32')
def load_embeddings(path):
#with open(path,'rb') as f:
emb_arr = KeyedVectors.load(path)
return emb_arr
def build_matrix(word_index, path, dim=300):
embedding_index = load_embeddings(path)
embedding_matrix = np.zeros((max_features + 1, dim))
unknown_words = []
for word, i in word_index.items():
if i <= max_features:
try:
embedding_matrix[i] = embedding_index[word]
except KeyError:
try:
embedding_matrix[i] = embedding_index[word.lower()]
except KeyError:
try:
embedding_matrix[i] = embedding_index[word.title()]
except KeyError:
unknown_words.append(word)
return embedding_matrix, unknown_words
def sigmoid(x):
return 1 / (1 + np.exp(-x))
class SpatialDropout(nn.Dropout2d):
def forward(self, x):
x = x.unsqueeze(2) # (N, T, 1, K)
x = x.permute(0, 3, 2, 1) # (N, K, 1, T)
x = super(SpatialDropout, self).forward(x) # (N, K, 1, T), some features are masked
x = x.permute(0, 3, 2, 1) # (N, T, 1, K)
x = x.squeeze(2) # (N, T, K)
return x
def train_model(learn,output_dim,lr=0.001,
batch_size=512, n_epochs=5):
n = len(learn.data.train_dl)
phases = [(TrainingPhase(n).schedule_hp('lr', lr * (0.6**(i)))) for i in range(n_epochs)]
sched = GeneralScheduler(learn, phases)
learn.callbacks.append(sched)
models_array = []
for epoch in range(n_epochs):
learn.fit(1)
learn.save('model_{}'.format(epoch))
models_array.append(copy.deepcopy(learn.model))
return models_array
def handle_punctuation(x):
x = x.translate(remove_dict)
x = x.translate(isolate_dict)
return x
def handle_contractions(x):
x = tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = ' '.join(x)
return x
def preprocess(x):
x = handle_punctuation(x)
x = handle_contractions(x)
x = fix_quote(x)
return x
class SequenceBucketCollator():
def __init__(self, choose_length, sequence_index, length_index, label_index=None):
self.choose_length = choose_length
self.sequence_index = sequence_index
self.length_index = length_index
self.label_index = label_index
def __call__(self, batch):
batch = [torch.stack(x) for x in list(zip(*batch))]
sequences = batch[self.sequence_index]
lengths = batch[self.length_index]
length = self.choose_length(lengths)
mask = torch.arange(start=maxlen, end=0, step=-1) < length
padded_sequences = sequences[:, mask]
batch[self.sequence_index] = padded_sequences
if self.label_index is not None:
return [x for i, x in enumerate(batch) if i != self.label_index], batch[self.label_index]
return batch
class SoftmaxPooling(nn.Module):
def __init__(self, dim=1):
super(self.__class__, self).__init__()
self.dim = dim
def forward(self, x):
return (x * x.softmax(dim=self.dim)).sum(dim=self.dim)
class NeuralNet(nn.Module):
def __init__(self, embedding_matrix, num_aux_targets):
super(NeuralNet, self).__init__()
embed_size = embedding_matrix.shape[1]
self.embedding = nn.Embedding(max_features, embed_size)
self.embedding.weight = nn.Parameter(torch.tensor(embedding_matrix, dtype=torch.float32))
self.embedding.weight.requires_grad = False
self.embedding_dropout = SpatialDropout(0.3)
self.lstm1 = nn.LSTM(embed_size, LSTM_UNITS, bidirectional=True, batch_first=True)
self.lstm2 = nn.LSTM(LSTM_UNITS * 2, LSTM_UNITS, bidirectional=True, batch_first=True)
self.linear_out = nn.Sequential(
nn.Dropout(0.5),
nn.BatchNorm1d(DENSE_HIDDEN_UNITS),
nn.Linear(DENSE_HIDDEN_UNITS, 1)
)
self.linear_aux_out = nn.Sequential(
nn.Dropout(0.5),
nn.BatchNorm1d(DENSE_HIDDEN_UNITS),
nn.Linear(DENSE_HIDDEN_UNITS, num_aux_targets)
)
self.softmaxpool = SoftmaxPooling()
def forward(self, x, lengths=None):
h_embedding = self.embedding(x.long())
h_embedding = self.embedding_dropout(h_embedding)
h_lstm1, _ = self.lstm1(h_embedding)
h_lstm2, _ = self.lstm2(h_lstm1)
# global average pooling
avg_pool = torch.mean(h_lstm2, 1)
# global max pooling
max_pool, _ = torch.max(h_lstm2, 1)
# softmax pooling
soft_pool = self.softmaxpool(h_lstm2)
h_conc = torch.cat((max_pool, avg_pool, soft_pool), 1)
hidden = h_conc
result = self.linear_out(hidden)
aux_result = self.linear_aux_out(hidden)
out = torch.cat([result, aux_result], 1)
return out
def custom_loss(data, targets):
bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:,1:2])(data[:,:1],targets[:,:1])
bce_loss_2 = nn.BCEWithLogitsLoss()(data[:,1:],targets[:,2:])
return (bce_loss_1 * loss_weight) + bce_loss_2
def reduce_mem_usage(df):
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def ensemble_predictions(predictions, weights, type_="linear"):
assert np.isclose(np.sum(weights), 1.0)
if type_ == "linear":
res = np.average(predictions, weights=weights, axis=0)
elif type_ == "harmonic":
res = np.average([1 / p for p in predictions], weights=weights, axis=0)
return 1 / res
elif type_ == "geometric":
numerator = np.average(
[np.log(p) for p in predictions], weights=weights, axis=0
)
res = np.exp(numerator / sum(weights))
return res
elif type_ == "rank":
res = np.average([rankdata(p) for p in predictions], weights=weights, axis=0)
return res / (len(res) + 1)
return res
warnings.filterwarnings(action='once')
device = torch.device('cuda')
SEED = 1234
BATCH_SIZE = 512
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
tqdm.pandas()
CRAWL_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/crawl-300d-2M.gensim'
PARAGRAM_EMBEDDING_PATH = '../input/gensim-embeddings-dataset/paragram_300_sl999.gensim'
NUM_MODELS = 1
LSTM_UNITS = 256
DENSE_HIDDEN_UNITS = 1536
if not is_interactive():
def nop(it, *a, **k):
return it
tqdm = nop
fastprogress.fastprogress.NO_BAR = True
master_bar, progress_bar = force_console_behavior()
fastai.basic_train.master_bar, fastai.basic_train.progress_bar = (master_bar, progress_bar)
seed_everything()
x_train = pd.read_csv('../input/jigsawbiaspreprocessed/x_train.csv', header=None)[0].astype('str')
y_aux_train = np.load('../input/jigsawbiaspreprocessed/y_aux_train.npy')
y_train = np.load('../input/jigsawbiaspreprocessed/y_train.npy')
loss_weight = 3.209226860170181
max_features = 400000
train = pd.read_csv('../input/jigsaw-unintended-bias-in-toxicity-classification/train.csv')
annot_idx = train[train['identity_annotator_count'] > 0].sample(n=48660, random_state=13).index
not_annot_idx = train[train['identity_annotator_count'] == 0].sample(n=48660, random_state=13).index
x_val_idx = list(set(annot_idx).union(set(not_annot_idx)))
x_train_idx = list(set(x_train.index) - set(x_val_idx))
X_train = x_train.loc[x_train_idx]
Y_train = y_train[x_train_idx]
Y_train[:, 0] = Y_train[:, 0] * 0.9 + 0.05
Y_aux_train = y_aux_train[x_train_idx] * 0.9 + 0.05
X_val = x_train.loc[x_val_idx]
Y_val = y_train[x_val_idx]
Y_aux_val = y_aux_train[x_val_idx]
tokenizer = text.Tokenizer(num_words=max_features, filters='', lower=False)
tokenizer.fit_on_texts(list(X_train))
crawl_matrix, unknown_words_crawl = build_matrix(tokenizer.word_index, CRAWL_EMBEDDING_PATH)
print('n unknown words (crawl): ', len(unknown_words_crawl))
paragram_matrix, unknown_words_paragram = build_matrix(tokenizer.word_index, PARAGRAM_EMBEDDING_PATH)
print('n unknown words (paragram): ', len(unknown_words_paragram))
max_features = max_features or len(tokenizer.word_index) + 1
max_features
embedding_matrix = np.concatenate([crawl_matrix, paragram_matrix], axis=-1)
print(embedding_matrix.shape)
del crawl_matrix
del paragram_matrix
gc.collect()
y_train_torch = torch.tensor(np.hstack([Y_train, Y_aux_train]), dtype=torch.float32)
X_train = tokenizer.texts_to_sequences(X_train) | code |
332359/cell_4 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train['Age'] = train['Age'].fillna(train['Age'].median())
train.loc[train['Sex'] == 'male', 'Sex'] = 0
train.loc[train['Sex'] == 'female', 'Sex'] = 1
train['Embarked'] = train['Embarked'].fillna('S')
train.loc[train['Embarked'] == 'S', 'Embarked'] = 0
train.loc[train['Embarked'] == 'C', 'Embarked'] = 1
train.loc[train['Embarked'] == 'Q', 'Embarked'] = 2
print(train['Embarked'].unique()) | code |
332359/cell_2 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.describe() | code |
332359/cell_1 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8'))
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train.head() | code |
332359/cell_3 | [
"text_html_output_1.png"
] | from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train['Age'] = train['Age'].fillna(train['Age'].median())
train.describe() | code |
332359/cell_5 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LinearRegression
from subprocess import check_output
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from subprocess import check_output
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train['Age'] = train['Age'].fillna(train['Age'].median())
train.loc[train['Sex'] == 'male', 'Sex'] = 0
train.loc[train['Sex'] == 'female', 'Sex'] = 1
train['Embarked'] = train['Embarked'].fillna('S')
train.loc[train['Embarked'] == 'S', 'Embarked'] = 0
train.loc[train['Embarked'] == 'C', 'Embarked'] = 1
train.loc[train['Embarked'] == 'Q', 'Embarked'] = 2
train_predictors = train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].values
test_predictors = train[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']].values
target = train['Survived']
alg = LinearRegression()
alg.fit(train_predictors, target)
test_predictions = alg.predict(test_predictors)
print(test_predictions) | code |
74063893/cell_42 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(dataset['all_cleaned'])
X = X.toarray()
preparing_test_df = vectorizer.transform(test_dataset_cleaned['all_cleaned'])
preparing_test_df = preparing_test_df.toarray()
from sklearn.linear_model import LogisticRegression
X_train = np.array(X)
print(X_train.shape)
y_train = dataset['target']
print(y_train.shape)
X_test = np.array(preparing_test_df)
print(X_test.shape)
clf = LogisticRegression(solver='liblinear')
clf.fit(X_train, y_train) | code |
74063893/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
train_dataset['target'].value_counts() | code |
74063893/cell_25 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
print(stopwords.words('english')) | code |
74063893/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
test_dataset_cleaned.tail(100) | code |
74063893/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
dataset.head(520) | code |
74063893/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape | code |
74063893/cell_40 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(dataset['all_cleaned'])
X = X.toarray()
print(X.shape)
preparing_test_df = vectorizer.transform(test_dataset_cleaned['all_cleaned'])
preparing_test_df = preparing_test_df.toarray()
print(preparing_test_df.shape) | code |
74063893/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
test_dataset_cleaned.head(520) | code |
74063893/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
dataset.head(520) | code |
74063893/cell_48 | [
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(dataset['all_cleaned'])
X = X.toarray()
preparing_test_df = vectorizer.transform(test_dataset_cleaned['all_cleaned'])
preparing_test_df = preparing_test_df.toarray()
from sklearn.linear_model import LogisticRegression
X_train = np.array(X)
y_train = dataset['target']
X_test = np.array(preparing_test_df)
clf = LogisticRegression(solver='liblinear')
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
submission = pd.DataFrame({'id': test_dataset['id'], 'target': prediction})
submission.head() | code |
74063893/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
test_dataset.head() | code |
74063893/cell_45 | [
"text_html_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(dataset['all_cleaned'])
X = X.toarray()
preparing_test_df = vectorizer.transform(test_dataset_cleaned['all_cleaned'])
preparing_test_df = preparing_test_df.toarray()
from sklearn.linear_model import LogisticRegression
X_train = np.array(X)
y_train = dataset['target']
X_test = np.array(preparing_test_df)
clf = LogisticRegression(solver='liblinear')
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
prediction | code |
74063893/cell_49 | [
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(dataset['all_cleaned'])
X = X.toarray()
preparing_test_df = vectorizer.transform(test_dataset_cleaned['all_cleaned'])
preparing_test_df = preparing_test_df.toarray()
from sklearn.linear_model import LogisticRegression
X_train = np.array(X)
y_train = dataset['target']
X_test = np.array(preparing_test_df)
clf = LogisticRegression(solver='liblinear')
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
submission = pd.DataFrame({'id': test_dataset['id'], 'target': prediction})
submission.shape | code |
74063893/cell_18 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
print(dataset.shape)
test_dataset_cleaned.shape | code |
74063893/cell_32 | [
"text_plain_output_1.png"
] | from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import pandas as pd
import re
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
def clean(data):
data = data.lower()
data = re.sub('https?://\\S+|www\\.\\S+', ' ', data)
data = re.sub('\\W', ' ', data)
data = re.sub('\n', ' ', data)
data = re.sub(' +', ' ', data)
data = re.sub('^ ', ' ', data)
data = re.sub(' $', ' ', data)
data = re.sub('#', ' ', data)
data = re.sub('@', ' ', data)
data = re.sub('[^a-zA-Z]', ' ', data)
return data
stop = set(stopwords.words('english'))
def remove_stopwords(data):
words = [word for word in data if word not in stop]
words = ''.join(words).split()
words = [words.lower() for words in data.split()]
return words
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
def lemmatization(data):
lemmas = []
for word in data.split():
lemmas.append(lemmatizer.lemmatize(word))
return ' '.join(lemmas)
dataset['all_cleaned'].apply(lemmatization)
test_dataset_cleaned['all_cleaned'].apply(lemmatization) | code |
74063893/cell_28 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
dataset.head(520) | code |
74063893/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.info() | code |
74063893/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
train_dataset['location'].value_counts() | code |
74063893/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
train_dataset['keyword'].value_counts() | code |
74063893/cell_14 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
plt.figure(figsize=(15, 7))
sns.countplot(train_dataset['target']) | code |
74063893/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
dataset.head(100) | code |
74063893/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
train_dataset.head() | code |
74063893/cell_27 | [
"text_html_output_1.png"
] | from nltk.corpus import stopwords
import pandas as pd
import re
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
def clean(data):
data = data.lower()
data = re.sub('https?://\\S+|www\\.\\S+', ' ', data)
data = re.sub('\\W', ' ', data)
data = re.sub('\n', ' ', data)
data = re.sub(' +', ' ', data)
data = re.sub('^ ', ' ', data)
data = re.sub(' $', ' ', data)
data = re.sub('#', ' ', data)
data = re.sub('@', ' ', data)
data = re.sub('[^a-zA-Z]', ' ', data)
return data
stop = set(stopwords.words('english'))
def remove_stopwords(data):
words = [word for word in data if word not in stop]
words = ''.join(words).split()
words = [words.lower() for words in data.split()]
return words
dataset['all_cleaned'].apply(remove_stopwords)
test_dataset_cleaned['all_cleaned'].apply(remove_stopwords) | code |
74063893/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.head() | code |
74063893/cell_36 | [
"text_html_output_1.png"
] | import pandas as pd
train_dataset = pd.read_csv('../input/nlp-getting-started/train.csv')
test_dataset = pd.read_csv('../input/nlp-getting-started/test.csv')
train_dataset.shape
train_dataset.fillna('', inplace=True)
test_dataset.fillna('', inplace=True)
dataset = pd.DataFrame()
test_dataset_cleaned = pd.DataFrame()
dataset['all_combined'] = train_dataset['keyword'] + ' ' + train_dataset['location'] + ' ' + train_dataset['text']
test_dataset_cleaned['all_combined'] = test_dataset['keyword'] + ' ' + test_dataset['location'] + ' ' + test_dataset['text']
test_dataset_cleaned.shape
dataset.head(520) | code |
74057429/cell_2 | [
"text_plain_output_1.png"
] | !pip install --upgrade tensorflow | code |
74057429/cell_5 | [
"application_vnd.jupyter.stderr_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | import numpy as np # linear algebra
import tensorflow as tf
import tensorflow_datasets as tfds
def load_data():
mnist_train = tfds.load('mnist', split='train', shuffle_files=True)
x_train = np.zeros((60000, 28, 28, 1))
y_train = np.zeros((60000, 1))
i = 0
for ex in mnist_train:
x_train[i] = ex['image']
y_train[i] = ex['label']
i = i + 1
mnist_train_c = tfds.load('mnist_corrupted/splatter', split='train', shuffle_files=True)
x_test = np.zeros((60000, 28, 28, 1))
y_test = np.zeros((60000, 1))
i = 0
for ex in mnist_train_c:
x_test[i] = ex['image']
y_test[i] = ex['label']
i = i + 1
x_train = 1 - x_train / 255.0
x_train = x_train.astype(np.float32)
y_train_oh = tf.keras.utils.to_categorical(y_train)
x_test = 1 - x_test / 255.0
x_test = x_test.astype(np.float32)
y_test_oh = tf.keras.utils.to_categorical(y_test)
return ((x_train, y_train, y_train_oh), (x_test, y_test, y_test_oh))
(x_train, y_train, y_train_oh), (x_test_c, y_test_c, y_test_oh_c) = load_data() | code |
104118935/cell_9 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
fig, ax = plt.subplots(figsize=(15,3))
ax=sns.countplot(x='launch site',data=df)
plt.xticks(rotation=90)
fig, ax = plt.subplots(figsize=(15,3))
ax=sns.countplot(x='launch site',data=df)
plt.xticks(rotation=90)
fig, ax = plt.subplots(figsize=(15, 3))
ax = sns.countplot(x='launch status', data=df) | code |
104118935/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
df['launch site'].groupby(df['launch site']).count().sort_values(ascending=False) | code |
104118935/cell_6 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
def hlaunch_site(value):
a = str(value).split(' ')
if 'Satish' in a:
return 'Satish Dhawan Space Centre, Sriharikota, Andhra Pradesh'
else:
return value
df['launch site'] = df['launch site'].apply(hlaunch_site)
df['launch site'].groupby(df['launch site']).count().sort_values(ascending=False) | code |
104118935/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df | code |
104118935/cell_11 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
(df['launch site'] == 'Satish Dhawan Space Centre, Sriharikota, Andhra Pradesh').groupby(df['launch status']).count() | code |
104118935/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
104118935/cell_7 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
fig, ax = plt.subplots(figsize=(15,3))
ax=sns.countplot(x='launch site',data=df)
plt.xticks(rotation=90)
fig, ax = plt.subplots(figsize=(15, 3))
ax = sns.countplot(x='launch site', data=df)
plt.xticks(rotation=90) | code |
104118935/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
df['launch status'].groupby(df['launch status']).count() | code |
104118935/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import missingno as msno
import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
msno.bar(df, figsize=(6, 3), color='magenta') | code |
104118935/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
df[df['launch status'] == 0] | code |
104118935/cell_5 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/list-of-indian-satellites/List_of_Indian_satellites.csv', encoding='cp1252')
df
fig, ax = plt.subplots(figsize=(15, 3))
ax = sns.countplot(x='launch site', data=df)
plt.xticks(rotation=90) | code |
324293/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import colorsys
import matplotlib.pyplot as plt
labels = df.Gender.value_counts().index
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.05,1))
plt.title("Gender")
plt.show()
N = len(df.JobRoleInterest.value_counts().index)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.JobRoleInterest.value_counts().index
colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray']
patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.25, 1))
plt.title('Job Role Interest')
plt.show() | code |
324293/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import colorsys
import matplotlib.pyplot as plt
labels = df.Gender.value_counts().index
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.05, 1))
plt.title('Gender')
plt.show() | code |
324293/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import colorsys
plt.style.use('seaborn-talk')
df = pd.read_csv('../input/2016-FCC-New-Coders-Survey-Data.csv', sep=',') | code |
324293/cell_18 | [
"image_output_1.png"
] | import colorsys
import matplotlib.pyplot as plt
import pandas as pd
labels = df.Gender.value_counts().index
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.05,1))
plt.title("Gender")
plt.show()
N = len(df.JobRoleInterest.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.JobRoleInterest.value_counts().index
colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray']
patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.25, 1))
plt.title("Job Role Interest")
plt.show()
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.EmploymentField.value_counts().index
patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.3, 1))
plt.title("Employment Field")
plt.show()
df_ageranges = df.copy()
bins=[0, 20, 30, 40, 50, 60, 100]
df_ageranges['AgeRanges'] = pd.cut(df_ageranges['Age'], bins, labels=["< 20", "20-30", "30-40", "40-50", "50-60", "< 60"])
df2 = pd.crosstab(df_ageranges.AgeRanges,df_ageranges.JobPref).apply(lambda r: r/r.sum(), axis=1)
N = len(df_ageranges.AgeRanges.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
ax1 = df2.plot(kind="bar", stacked=True, color= RGB_tuples, title="Job preference per Age")
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines,labels, bbox_to_anchor=(1.51, 1))
df4 = pd.crosstab(df_ageranges.EmploymentField, df_ageranges.IsUnderEmployed).apply(lambda r: r / r.sum(), axis=1)
df4 = df4.sort_values(by=1.0)
N = len(df_ageranges.EmploymentField.value_counts().index)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
ax1 = df4.plot(kind='bar', stacked=True, color=RGB_tuples, title='Under-employed per Employment Field')
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines, ['No', 'Yes'], bbox_to_anchor=(1.51, 1)) | code |
324293/cell_15 | [
"image_output_1.png"
] | import colorsys
import matplotlib.pyplot as plt
import pandas as pd
labels = df.Gender.value_counts().index
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.05,1))
plt.title("Gender")
plt.show()
N = len(df.JobRoleInterest.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.JobRoleInterest.value_counts().index
colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray']
patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.25, 1))
plt.title("Job Role Interest")
plt.show()
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.EmploymentField.value_counts().index
patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.3, 1))
plt.title("Employment Field")
plt.show()
df_ageranges = df.copy()
bins = [0, 20, 30, 40, 50, 60, 100]
df_ageranges['AgeRanges'] = pd.cut(df_ageranges['Age'], bins, labels=['< 20', '20-30', '30-40', '40-50', '50-60', '< 60'])
df2 = pd.crosstab(df_ageranges.AgeRanges, df_ageranges.JobPref).apply(lambda r: r / r.sum(), axis=1)
N = len(df_ageranges.AgeRanges.value_counts().index)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
ax1 = df2.plot(kind='bar', stacked=True, color=RGB_tuples, title='Job preference per Age')
lines, labels = ax1.get_legend_handles_labels()
ax1.legend(lines, labels, bbox_to_anchor=(1.51, 1)) | code |
324293/cell_3 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
df.Age.hist(bins=100)
plt.xlabel('Age')
plt.title('Distribution of Age')
plt.show() | code |
324293/cell_12 | [
"image_output_1.png"
] | import colorsys
import matplotlib.pyplot as plt
labels = df.Gender.value_counts().index
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
patches, texts = plt.pie(df.Gender.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.05,1))
plt.title("Gender")
plt.show()
N = len(df.JobRoleInterest.value_counts().index)
HSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.JobRoleInterest.value_counts().index
colors = ['OliveDrab', 'Orange', 'OrangeRed', 'DarkCyan', 'Salmon', 'Sienna', 'Maroon', 'LightSlateGrey', 'DimGray']
patches, texts = plt.pie(df.JobRoleInterest.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.25, 1))
plt.title("Job Role Interest")
plt.show()
N = len(df.EmploymentField.value_counts().index)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = list(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples))
labels = df.EmploymentField.value_counts().index
patches, texts = plt.pie(df.EmploymentField.value_counts(), colors=RGB_tuples, startangle=90)
plt.axes().set_aspect('equal', 'datalim')
plt.legend(patches, labels, bbox_to_anchor=(1.3, 1))
plt.title('Employment Field')
plt.show() | code |
73079996/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from tensorflow import keras
import pandas as pd
import tensorflow as tf
sample_submission = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv')
test = sample_submission
test['BraTS21ID5'] = [format(x, '05d') for x in test.BraTS21ID]
test_dataset = Dataset(test, is_train=False)
model = keras.Model()
model = tf.keras.models.load_model('../input/brain-tumor-3d-weights-l/Brain_3d_cls_FLAIR.h5')
preds = model.predict(test_dataset)
preds = preds.reshape(-1)
submission = pd.DataFrame({'BraTS21ID': sample_submission['BraTS21ID'], 'MGMT_value': preds})
submission | code |
73079996/cell_9 | [
"image_output_1.png"
] | from pydicom.pixel_data_handlers.util import apply_voi_lut
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import pydicom
import re
data_directory = '../input/rsna-miccai-brain-tumor-radiogenomic-classification'
pytorch3dpath = '../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D'
mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
IMAGE_SIZE = 256
NUM_IMAGES = 64
def load_dicom_image(path, img_size=IMAGE_SIZE, voi_lut=True, rotate=0):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
if rotate > 0:
rot_choices = [0, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]
data = cv2.rotate(data, rot_choices[rotate])
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(scan_id, num_imgs=NUM_IMAGES, img_size=IMAGE_SIZE, mri_type='FLAIR', split='test', rotate=0):
files = sorted(glob.glob(f'{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm'), key=lambda var: [int(x) if x.isdigit() else x for x in re.findall('[^0-9]|[0-9]+', var)])
middle = len(files) // 2
num_imgs2 = num_imgs // 2
p1 = max(0, middle - num_imgs2)
p2 = min(len(files), middle + num_imgs2)
img3d = np.stack([load_dicom_image(f, rotate=rotate) for f in files[p1:p2]]).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
if np.min(img3d) < np.max(img3d):
img3d = img3d - np.min(img3d)
img3d = img3d / np.max(img3d)
return np.expand_dims(img3d, 0)
a = load_dicom_images_3d('00001')
print(a.shape)
print(np.min(a), np.max(a), np.mean(a), np.median(a))
image = a[0]
print('Dimension of the CT scan is:', image.shape)
plt.imshow(np.squeeze(image[:, :, 30]), cmap='gray') | code |
73079996/cell_23 | [
"text_plain_output_1.png"
] | from pydicom.pixel_data_handlers.util import apply_voi_lut
from tensorflow import keras
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydicom
import re
import tensorflow as tf
data_directory = '../input/rsna-miccai-brain-tumor-radiogenomic-classification'
pytorch3dpath = '../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D'
mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
IMAGE_SIZE = 256
NUM_IMAGES = 64
sample_submission = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv')
test = sample_submission
test['BraTS21ID5'] = [format(x, '05d') for x in test.BraTS21ID]
def load_dicom_image(path, img_size=IMAGE_SIZE, voi_lut=True, rotate=0):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
if rotate > 0:
rot_choices = [0, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]
data = cv2.rotate(data, rot_choices[rotate])
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(scan_id, num_imgs=NUM_IMAGES, img_size=IMAGE_SIZE, mri_type='FLAIR', split='test', rotate=0):
files = sorted(glob.glob(f'{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm'), key=lambda var: [int(x) if x.isdigit() else x for x in re.findall('[^0-9]|[0-9]+', var)])
middle = len(files) // 2
num_imgs2 = num_imgs // 2
p1 = max(0, middle - num_imgs2)
p2 = min(len(files), middle + num_imgs2)
img3d = np.stack([load_dicom_image(f, rotate=rotate) for f in files[p1:p2]]).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
if np.min(img3d) < np.max(img3d):
img3d = img3d - np.min(img3d)
img3d = img3d / np.max(img3d)
return np.expand_dims(img3d, 0)
a = load_dicom_images_3d('00001')
image = a[0]
def plot_slices(num_rows, num_columns, width, height, data):
"""Plot a montage of 20 CT slices"""
data = np.rot90(np.array(data))
data = np.transpose(data)
data = np.reshape(data, (num_rows, num_columns, width, height))
rows_data, columns_data = data.shape[0], data.shape[1]
heights = [slc[0].shape[0] for slc in data]
widths = [slc.shape[1] for slc in data[0]]
fig_width = 12.0
fig_height = fig_width * sum(heights) / sum(widths)
f, axarr = plt.subplots(
rows_data,
columns_data,
figsize=(fig_width, fig_height),
gridspec_kw={"height_ratios": heights},
)
for i in range(rows_data):
for j in range(columns_data):
axarr[i, j].imshow(data[i][j], cmap="gray")
axarr[i, j].axis("off")
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.show()
# Visualize montage of slices.
# 5 rows and 10 columns for 100 slices of the CT scan.
plot_slices(3, 10, 256, 256, image[:, :, :30])
test_dataset = Dataset(test, is_train=False)
for i in range(1):
image = test_dataset[i]
model = keras.Model()
model = tf.keras.models.load_model('../input/brain-tumor-3d-weights-l/Brain_3d_cls_FLAIR.h5')
preds = model.predict(test_dataset)
preds = preds.reshape(-1)
submission = pd.DataFrame({'BraTS21ID': sample_submission['BraTS21ID'], 'MGMT_value': preds})
submission.to_csv('submission.csv', index=False)
plt.figure(figsize=(5, 5))
plt.hist(submission['MGMT_value']) | code |
73079996/cell_19 | [
"image_output_1.png"
] | from tensorflow import keras
import pandas as pd
import tensorflow as tf
sample_submission = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv')
test = sample_submission
test['BraTS21ID5'] = [format(x, '05d') for x in test.BraTS21ID]
test_dataset = Dataset(test, is_train=False)
model = keras.Model()
model = tf.keras.models.load_model('../input/brain-tumor-3d-weights-l/Brain_3d_cls_FLAIR.h5')
preds = model.predict(test_dataset)
preds = preds.reshape(-1)
preds | code |
73079996/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
sample_submission = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv')
test = sample_submission
test['BraTS21ID5'] = [format(x, '05d') for x in test.BraTS21ID]
test.head(3) | code |
73079996/cell_14 | [
"text_plain_output_2.png",
"text_plain_output_1.png",
"image_output_1.png"
] | from pydicom.pixel_data_handlers.util import apply_voi_lut
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pydicom
import re
data_directory = '../input/rsna-miccai-brain-tumor-radiogenomic-classification'
pytorch3dpath = '../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D'
mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
IMAGE_SIZE = 256
NUM_IMAGES = 64
sample_submission = pd.read_csv('../input/rsna-miccai-brain-tumor-radiogenomic-classification/sample_submission.csv')
test = sample_submission
test['BraTS21ID5'] = [format(x, '05d') for x in test.BraTS21ID]
def load_dicom_image(path, img_size=IMAGE_SIZE, voi_lut=True, rotate=0):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
if rotate > 0:
rot_choices = [0, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]
data = cv2.rotate(data, rot_choices[rotate])
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(scan_id, num_imgs=NUM_IMAGES, img_size=IMAGE_SIZE, mri_type='FLAIR', split='test', rotate=0):
files = sorted(glob.glob(f'{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm'), key=lambda var: [int(x) if x.isdigit() else x for x in re.findall('[^0-9]|[0-9]+', var)])
middle = len(files) // 2
num_imgs2 = num_imgs // 2
p1 = max(0, middle - num_imgs2)
p2 = min(len(files), middle + num_imgs2)
img3d = np.stack([load_dicom_image(f, rotate=rotate) for f in files[p1:p2]]).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
if np.min(img3d) < np.max(img3d):
img3d = img3d - np.min(img3d)
img3d = img3d / np.max(img3d)
return np.expand_dims(img3d, 0)
a = load_dicom_images_3d('00001')
image = a[0]
def plot_slices(num_rows, num_columns, width, height, data):
"""Plot a montage of 20 CT slices"""
data = np.rot90(np.array(data))
data = np.transpose(data)
data = np.reshape(data, (num_rows, num_columns, width, height))
rows_data, columns_data = data.shape[0], data.shape[1]
heights = [slc[0].shape[0] for slc in data]
widths = [slc.shape[1] for slc in data[0]]
fig_width = 12.0
fig_height = fig_width * sum(heights) / sum(widths)
f, axarr = plt.subplots(
rows_data,
columns_data,
figsize=(fig_width, fig_height),
gridspec_kw={"height_ratios": heights},
)
for i in range(rows_data):
for j in range(columns_data):
axarr[i, j].imshow(data[i][j], cmap="gray")
axarr[i, j].axis("off")
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.show()
# Visualize montage of slices.
# 5 rows and 10 columns for 100 slices of the CT scan.
plot_slices(3, 10, 256, 256, image[:, :, :30])
test_dataset = Dataset(test, is_train=False)
for i in range(1):
image = test_dataset[i]
print('Dimension of the CT scan is:', image.shape)
plt.imshow(image[0, :, :, 30], cmap='gray')
plt.show() | code |
73079996/cell_10 | [
"text_html_output_1.png"
] | from pydicom.pixel_data_handlers.util import apply_voi_lut
import cv2
import glob
import matplotlib.pyplot as plt
import numpy as np
import pydicom
import re
data_directory = '../input/rsna-miccai-brain-tumor-radiogenomic-classification'
pytorch3dpath = '../input/efficientnetpyttorch3d/EfficientNet-PyTorch-3D'
mri_types = ['FLAIR', 'T1w', 'T1wCE', 'T2w']
IMAGE_SIZE = 256
NUM_IMAGES = 64
def load_dicom_image(path, img_size=IMAGE_SIZE, voi_lut=True, rotate=0):
dicom = pydicom.read_file(path)
data = dicom.pixel_array
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
if rotate > 0:
rot_choices = [0, cv2.ROTATE_90_CLOCKWISE, cv2.ROTATE_90_COUNTERCLOCKWISE, cv2.ROTATE_180]
data = cv2.rotate(data, rot_choices[rotate])
data = cv2.resize(data, (img_size, img_size))
return data
def load_dicom_images_3d(scan_id, num_imgs=NUM_IMAGES, img_size=IMAGE_SIZE, mri_type='FLAIR', split='test', rotate=0):
files = sorted(glob.glob(f'{data_directory}/{split}/{scan_id}/{mri_type}/*.dcm'), key=lambda var: [int(x) if x.isdigit() else x for x in re.findall('[^0-9]|[0-9]+', var)])
middle = len(files) // 2
num_imgs2 = num_imgs // 2
p1 = max(0, middle - num_imgs2)
p2 = min(len(files), middle + num_imgs2)
img3d = np.stack([load_dicom_image(f, rotate=rotate) for f in files[p1:p2]]).T
if img3d.shape[-1] < num_imgs:
n_zero = np.zeros((img_size, img_size, num_imgs - img3d.shape[-1]))
img3d = np.concatenate((img3d, n_zero), axis=-1)
if np.min(img3d) < np.max(img3d):
img3d = img3d - np.min(img3d)
img3d = img3d / np.max(img3d)
return np.expand_dims(img3d, 0)
a = load_dicom_images_3d('00001')
image = a[0]
def plot_slices(num_rows, num_columns, width, height, data):
"""Plot a montage of 20 CT slices"""
data = np.rot90(np.array(data))
data = np.transpose(data)
data = np.reshape(data, (num_rows, num_columns, width, height))
rows_data, columns_data = (data.shape[0], data.shape[1])
heights = [slc[0].shape[0] for slc in data]
widths = [slc.shape[1] for slc in data[0]]
fig_width = 12.0
fig_height = fig_width * sum(heights) / sum(widths)
f, axarr = plt.subplots(rows_data, columns_data, figsize=(fig_width, fig_height), gridspec_kw={'height_ratios': heights})
for i in range(rows_data):
for j in range(columns_data):
axarr[i, j].imshow(data[i][j], cmap='gray')
axarr[i, j].axis('off')
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.show()
plot_slices(3, 10, 256, 256, image[:, :, :30]) | code |
17139154/cell_21 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf')
plt.figure(figsize=(15, 5))
plt.title('Média de eleitores por Município em cada UF')
sns.barplot(x=eleitores.uf, y=eleitores.total_eleitores) | code |
17139154/cell_9 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
df.head() | code |
17139154/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.head(10) | code |
17139154/cell_30 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf')
eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum()
norte = ['AM', 'RR', 'AP', 'PA', 'TO', 'RO', 'AC']
centroeste = ['MT', 'MS', 'GO']
sudeste = ['SP', 'ES', 'MG', 'RJ']
sul = ['PR', 'RS', 'SC']
nordeste = ['MA', 'PI', 'CE', 'RN', 'PE', 'PB', 'SE', 'AL', 'BA']
df_region = eleitores
df_region['regiao'] = ''
for i, r in df_region.iterrows():
if r['uf'] in norte:
df_region.at[i, 'regiao'] = 'Norte'
elif r['uf'] in centroeste:
df_region.at[i, 'regiao'] = 'Centro-Oeste'
elif r['uf'] in sudeste:
df_region.at[i, 'regiao'] = 'Sudeste'
elif r['uf'] in sul:
df_region.at[i, 'regiao'] = 'Sul'
else:
df_region.at[i, 'regiao'] = 'Nordeste'
df_ufs = pd.DataFrame(norte + centroeste + sudeste + sul + nordeste)
reg = pd.DataFrame(df_region['regiao'].value_counts())
elec = pd.DataFrame(df_region.drop(columns=['uf']).groupby(['regiao']).sum())
plt.figure(figsize=(25, 8))
sns.stripplot(x='total_eleitores', y='regiao', hue='uf', data=df_region, palette='muted', size=5, jitter=0.3) | code |
17139154/cell_33 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf')
eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum()
norte = ['AM', 'RR', 'AP', 'PA', 'TO', 'RO', 'AC']
centroeste = ['MT', 'MS', 'GO']
sudeste = ['SP', 'ES', 'MG', 'RJ']
sul = ['PR', 'RS', 'SC']
nordeste = ['MA', 'PI', 'CE', 'RN', 'PE', 'PB', 'SE', 'AL', 'BA']
df_region = eleitores
df_region['regiao'] = ''
for i, r in df_region.iterrows():
if r['uf'] in norte:
df_region.at[i, 'regiao'] = 'Norte'
elif r['uf'] in centroeste:
df_region.at[i, 'regiao'] = 'Centro-Oeste'
elif r['uf'] in sudeste:
df_region.at[i, 'regiao'] = 'Sudeste'
elif r['uf'] in sul:
df_region.at[i, 'regiao'] = 'Sul'
else:
df_region.at[i, 'regiao'] = 'Nordeste'
df_ufs = pd.DataFrame(norte + centroeste + sudeste + sul + nordeste)
reg = pd.DataFrame(df_region['regiao'].value_counts())
elec = pd.DataFrame(df_region.drop(columns=['uf']).groupby(['regiao']).sum())
plt.figure(figsize=(25, 8))
sns.stripplot(x='total_eleitores', y='regiao', hue='uf', data=df_region[df_region['total_eleitores'] < 100000], palette='bright', size=4, jitter=0.3) | code |
17139154/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf')
eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum()
norte = ['AM', 'RR', 'AP', 'PA', 'TO', 'RO', 'AC']
centroeste = ['MT', 'MS', 'GO']
sudeste = ['SP', 'ES', 'MG', 'RJ']
sul = ['PR', 'RS', 'SC']
nordeste = ['MA', 'PI', 'CE', 'RN', 'PE', 'PB', 'SE', 'AL', 'BA']
df_region = eleitores
df_region['regiao'] = ''
for i, r in df_region.iterrows():
if r['uf'] in norte:
df_region.at[i, 'regiao'] = 'Norte'
elif r['uf'] in centroeste:
df_region.at[i, 'regiao'] = 'Centro-Oeste'
elif r['uf'] in sudeste:
df_region.at[i, 'regiao'] = 'Sudeste'
elif r['uf'] in sul:
df_region.at[i, 'regiao'] = 'Sul'
else:
df_region.at[i, 'regiao'] = 'Nordeste'
df_region.head() | code |
17139154/cell_2 | [
"image_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
print(os.listdir('../input')) | code |
17139154/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter) | code |
17139154/cell_7 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum=1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show() | code |
17139154/cell_28 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
eleitores = df[['uf', 'total_eleitores']].sort_values(by='uf')
eleitores_grpd_by_uf = eleitores.groupby(['uf']).sum()
norte = ['AM', 'RR', 'AP', 'PA', 'TO', 'RO', 'AC']
centroeste = ['MT', 'MS', 'GO']
sudeste = ['SP', 'ES', 'MG', 'RJ']
sul = ['PR', 'RS', 'SC']
nordeste = ['MA', 'PI', 'CE', 'RN', 'PE', 'PB', 'SE', 'AL', 'BA']
df_region = eleitores
df_region['regiao'] = ''
for i, r in df_region.iterrows():
if r['uf'] in norte:
df_region.at[i, 'regiao'] = 'Norte'
elif r['uf'] in centroeste:
df_region.at[i, 'regiao'] = 'Centro-Oeste'
elif r['uf'] in sudeste:
df_region.at[i, 'regiao'] = 'Sudeste'
elif r['uf'] in sul:
df_region.at[i, 'regiao'] = 'Sul'
else:
df_region.at[i, 'regiao'] = 'Nordeste'
df_ufs = pd.DataFrame(norte + centroeste + sudeste + sul + nordeste)
reg = pd.DataFrame(df_region['regiao'].value_counts())
reg.plot(kind='pie', title='Quantidade de Municípios por Região', subplots=True, figsize=(10, 10))
elec = pd.DataFrame(df_region.drop(columns=['uf']).groupby(['regiao']).sum())
elec.plot(kind='pie', title='Quantidade de Eleitores por Região', subplots=True, figsize=(10, 10)) | code |
17139154/cell_17 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt # plotting
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
df = pd.read_csv('../input/BR_eleitorado_2016_municipio.csv', delimiter=';')
df.shape
corr = df.corr()
plt.figure(num=None, dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title('Correlation Matrix')
plt.show()
df = df.drop(columns=['cod_municipio_tse'])
x = sns.PairGrid(df)
x.map(plt.scatter)
uf = pd.DataFrame(df['uf'].value_counts())
plt.figure(figsize=(15, 5))
sns.barplot(x=uf.index, y=uf.uf, palette='rocket') | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.