path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
17141744/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
train['Sentiment'].unique() | code |
17141744/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48)
test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab)
data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32)
data_clas.show_batch() | code |
17141744/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48)
test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab)
data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32)
learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
learn_classifier.load_encoder('fine_tuned_enc')
learn_classifier.freeze()
learn_classifier.lr_find() | code |
17141744/cell_22 | [
"text_html_output_1.png"
] | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
test_id = test['PhraseId']
data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48)
test_datalist = TextList.from_df(test, cols='Phrase', vocab=data.vocab)
data_clas = TextList.from_df(train, cols='Phrase', vocab=data.vocab).split_by_rand_pct(0.2).label_from_df(cols='Sentiment', classes=['1', '2', '3', '4', '0'], label_cls=CategoryList).add_test(test_datalist).databunch(bs=32)
learn_classifier = text_classifier_learner(data_clas, AWD_LSTM, drop_mult=0.5)
learn_classifier.load_encoder('fine_tuned_enc')
learn_classifier.freeze()
learn_classifier.lr_find()
learn_classifier.fit_one_cycle(10, 0.01)
preds, target = learn_classifier.get_preds(DatasetType.Test, ordered=True)
labels = np.argmax(preds, axis=1)
submission = pd.DataFrame({'PhraseId': test_id, 'Sentiment': labels})
submission.to_csv('submission.csv', index=False)
submission.head() | code |
17141744/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
data = TextList.from_df(train, cols='Phrase').split_by_rand_pct(0.2).label_for_lm().databunch(bs=48)
learn = language_model_learner(data, AWD_LSTM, drop_mult=0.3)
learn.lr_find()
learn.recorder.plot() | code |
17141744/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv('../input/train.tsv', sep='\t')
test = pd.read_csv('../input/test.tsv', sep='\t')
train['Sentiment'] = train['Sentiment'].apply(str)
test.head() | code |
121150745/cell_42 | [
"text_plain_output_1.png"
] | from category_encoders import TargetEncoder
from lightgbm import LGBMClassifier
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
X_num = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
X_cat = ['race', 'gender', 'age', 'admission_type_id', 'discharge_disposition_id', 'admission_source_id', 'max_glu_serum', 'A1Cresult', 'metformin', 'repaglinide', 'nateglinide', 'glimepiride', 'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin', 'change', 'diabetesMed', 'payer_code', 'medical_specialty']
X_diag = ['diag_1', 'diag_2', 'diag_3']
X_id = ['encounter_id', 'patient_nbr']
cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
num_pipeline = Pipeline([('median imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler())])
diag_pipeline = Pipeline([('diag_pipeline', MapDiagnosis()), ('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
id_pipeline = Pipeline([('encoder', patient_nbr_transformer()), ('scaler', MinMaxScaler())])
full_pipeline = ColumnTransformer(transformers=[('num', num_pipeline, X_num), ('cat', cat_pipeline, X_cat), ('id', id_pipeline, X_id), ('diag', diag_pipeline, X_diag)], remainder='drop')
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
y_test_prepared = le.transform(y_test)
X_train_prepared = full_pipeline.fit_transform(X_train, y_train_prepared)
X_test_prepared = full_pipeline.transform(X_test)
final_model = LGBMClassifier(random_state=42, max_depth=1)
final_model.fit(X_train_prepared, y_train_prepared)
preds_class1 = final_model.predict(X_train_prepared)
preds_class2 = final_model.predict(X_test_prepared)
print('test f1 score:', f1_score(preds_class2, y_test_prepared, average='micro')) | code |
121150745/cell_4 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
print('The shape of the dataset is {}.\n\n'.format(df.shape))
df.head() | code |
121150745/cell_30 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
y_test_prepared = le.transform(y_test)
print(y_test_prepared.shape) | code |
121150745/cell_6 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
df.describe() | code |
121150745/cell_29 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
print(y_train_prepared.shape) | code |
121150745/cell_39 | [
"text_plain_output_1.png"
] | from category_encoders import TargetEncoder
from lightgbm import LGBMClassifier
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
X_num = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
X_cat = ['race', 'gender', 'age', 'admission_type_id', 'discharge_disposition_id', 'admission_source_id', 'max_glu_serum', 'A1Cresult', 'metformin', 'repaglinide', 'nateglinide', 'glimepiride', 'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin', 'change', 'diabetesMed', 'payer_code', 'medical_specialty']
X_diag = ['diag_1', 'diag_2', 'diag_3']
X_id = ['encounter_id', 'patient_nbr']
cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
num_pipeline = Pipeline([('median imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler())])
diag_pipeline = Pipeline([('diag_pipeline', MapDiagnosis()), ('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
id_pipeline = Pipeline([('encoder', patient_nbr_transformer()), ('scaler', MinMaxScaler())])
full_pipeline = ColumnTransformer(transformers=[('num', num_pipeline, X_num), ('cat', cat_pipeline, X_cat), ('id', id_pipeline, X_id), ('diag', diag_pipeline, X_diag)], remainder='drop')
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
X_train_prepared = full_pipeline.fit_transform(X_train, y_train_prepared)
final_model = LGBMClassifier(random_state=42, max_depth=1)
final_model.fit(X_train_prepared, y_train_prepared)
preds_class1 = final_model.predict(X_train_prepared)
print('train f1 score:', f1_score(preds_class1, y_train_prepared, average='micro')) | code |
121150745/cell_2 | [
"text_html_output_1.png"
] | import pandas as pd
pd.set_option('display.max_columns', None)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os
import re
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import HistGradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from category_encoders import TargetEncoder
from category_encoders import CatBoostEncoder
from category_encoders import CountEncoder
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier | code |
121150745/cell_19 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
train_data = pd.concat([X_train, y_train], axis=1)
train_data = train_data.drop(['encounter_id', 'patient_nbr'], axis=1)
num_cols = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
train_data[num_cols].hist(figsize=(20, 20), bins=50) | code |
121150745/cell_18 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
train_data = pd.concat([X_train, y_train], axis=1)
train_data = train_data.drop(['encounter_id', 'patient_nbr'], axis=1)
num_cols = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
train_data[num_cols].describe().T | code |
121150745/cell_32 | [
"text_plain_output_1.png"
] | from category_encoders import TargetEncoder
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
X_num = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
X_cat = ['race', 'gender', 'age', 'admission_type_id', 'discharge_disposition_id', 'admission_source_id', 'max_glu_serum', 'A1Cresult', 'metformin', 'repaglinide', 'nateglinide', 'glimepiride', 'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin', 'change', 'diabetesMed', 'payer_code', 'medical_specialty']
X_diag = ['diag_1', 'diag_2', 'diag_3']
X_id = ['encounter_id', 'patient_nbr']
cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
num_pipeline = Pipeline([('median imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler())])
diag_pipeline = Pipeline([('diag_pipeline', MapDiagnosis()), ('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
id_pipeline = Pipeline([('encoder', patient_nbr_transformer()), ('scaler', MinMaxScaler())])
full_pipeline = ColumnTransformer(transformers=[('num', num_pipeline, X_num), ('cat', cat_pipeline, X_cat), ('id', id_pipeline, X_id), ('diag', diag_pipeline, X_diag)], remainder='drop')
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
X_train_prepared = full_pipeline.fit_transform(X_train, y_train_prepared)
X_test_prepared = full_pipeline.transform(X_test)
print(X_test_prepared.shape) | code |
121150745/cell_47 | [
"text_plain_output_1.png"
] | from collections import Counter
import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
X = df.drop(['readmitted'], axis=1)
y = df['readmitted'].copy()
train_data = pd.concat([X_train, y_train], axis=1)
class patient_nbr_transformer:
def transform(self, X, y=None):
new_data = X[['encounter_id', 'patient_nbr']].copy(deep=True)
self.history = pd.concat([self.history, new_data]).drop_duplicates('encounter_id').reset_index(drop=True)
countDat = self.history['patient_nbr'].value_counts()
X_copy = X.copy(deep=True)
X_copy['Count'] = X_copy['patient_nbr'].apply(lambda x: countDat[x])
history_copy = self.history.copy(deep=True)
patient_nbr_array = history_copy['patient_nbr'].unique()
history_copy['visit_number'] = history_copy['patient_nbr']
for i in patient_nbr_array:
index_list = history_copy[history_copy['patient_nbr'] == i].sort_values(['patient_nbr', 'encounter_id'], axis=0, ascending=True, inplace=False).index.tolist()
for j in range(1, len(index_list) + 1):
history_copy['visit_number'][index_list[j - 1]] = j
X_copy['visit_number'] = X_copy['encounter_id']
X_copy['visit_number'] = X_copy['visit_number'].apply(lambda x: history_copy.loc[history_copy['encounter_id'] == x, 'visit_number'].iloc[0])
return X_copy
def fit(self, X, y=None):
self.history = X[['encounter_id', 'patient_nbr']].copy(deep=True)
return self
test_df = pd.read_csv(os.path.join(dataset_path, 'test.csv'), na_values='?')
from collections import Counter
Counter(test_df['readmitted']) | code |
121150745/cell_31 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from category_encoders import TargetEncoder
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
X_num = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
X_cat = ['race', 'gender', 'age', 'admission_type_id', 'discharge_disposition_id', 'admission_source_id', 'max_glu_serum', 'A1Cresult', 'metformin', 'repaglinide', 'nateglinide', 'glimepiride', 'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin', 'change', 'diabetesMed', 'payer_code', 'medical_specialty']
X_diag = ['diag_1', 'diag_2', 'diag_3']
X_id = ['encounter_id', 'patient_nbr']
cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
num_pipeline = Pipeline([('median imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler())])
diag_pipeline = Pipeline([('diag_pipeline', MapDiagnosis()), ('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
id_pipeline = Pipeline([('encoder', patient_nbr_transformer()), ('scaler', MinMaxScaler())])
full_pipeline = ColumnTransformer(transformers=[('num', num_pipeline, X_num), ('cat', cat_pipeline, X_cat), ('id', id_pipeline, X_id), ('diag', diag_pipeline, X_diag)], remainder='drop')
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
X_train_prepared = full_pipeline.fit_transform(X_train, y_train_prepared)
print(X_train_prepared.shape) | code |
121150745/cell_5 | [
"text_plain_output_1.png"
] | import os
import pandas as pd
dataset_path = '/kaggle/input/diabetes-readmission-prediction-i43/'
df = pd.read_csv(os.path.join(dataset_path, 'train.csv'), na_values='?')
df.info() | code |
121150745/cell_36 | [
"text_plain_output_1.png"
] | from category_encoders import TargetEncoder
from lightgbm import LGBMClassifier
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MinMaxScaler
X_num = ['time_in_hospital', 'num_lab_procedures', 'num_procedures', 'num_medications', 'number_outpatient', 'number_emergency', 'number_inpatient', 'number_diagnoses']
X_cat = ['race', 'gender', 'age', 'admission_type_id', 'discharge_disposition_id', 'admission_source_id', 'max_glu_serum', 'A1Cresult', 'metformin', 'repaglinide', 'nateglinide', 'glimepiride', 'glipizide', 'glyburide', 'pioglitazone', 'rosiglitazone', 'insulin', 'change', 'diabetesMed', 'payer_code', 'medical_specialty']
X_diag = ['diag_1', 'diag_2', 'diag_3']
X_id = ['encounter_id', 'patient_nbr']
cat_pipeline = Pipeline([('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
num_pipeline = Pipeline([('median imputer', SimpleImputer(strategy='median')), ('scaler', MinMaxScaler())])
diag_pipeline = Pipeline([('diag_pipeline', MapDiagnosis()), ('imputer', SimpleImputer(strategy='most_frequent')), ('encoder', TargetEncoder())])
id_pipeline = Pipeline([('encoder', patient_nbr_transformer()), ('scaler', MinMaxScaler())])
full_pipeline = ColumnTransformer(transformers=[('num', num_pipeline, X_num), ('cat', cat_pipeline, X_cat), ('id', id_pipeline, X_id), ('diag', diag_pipeline, X_diag)], remainder='drop')
le = LabelEncoder()
y_train_prepared = le.fit_transform(y_train)
X_train_prepared = full_pipeline.fit_transform(X_train, y_train_prepared)
final_model = LGBMClassifier(random_state=42, max_depth=1)
final_model.fit(X_train_prepared, y_train_prepared) | code |
89123748/cell_30 | [
"text_plain_output_1.png"
] | !pip uninstall tensorflow -y | code |
89123748/cell_45 | [
"text_plain_output_1.png"
] | """gc.collect()
dataloader = Dataloader(train = train_idx, val = val_idx, batchsize=BATCHSIZE, buffersize=BUFFERSIZE)
train_loader_tf, val_loader_tf = dataloader.return_loaders()""" | code |
89123748/cell_32 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import warnings
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore') | code |
89123748/cell_28 | [
"text_plain_output_1.png"
] | !nvidia-smi | code |
89123748/cell_35 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import gc
import glob
import numpy as np
import os
import pandas as pd
import tensorflow as tf
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import glob
tqdm.pandas()
import matplotlib.pyplot as plt
import gc
train_path = '../input/ubiquant-market-prediction/train.csv'
test_path = '../input/ubiquant-market-prediction/example_test.csv'
# Lets first try to reduce the size of the dataframe by bringing it to right dtype and saving those chunks.
def reduce_memory_usage(df, chunk):
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Initial Memory chunk: {:.3f}".format(start_mem))
for col in df.columns:
type_ = df[col].dtype
if str(type_) != "object":
if str(type_)[:3] == "int":
min_ = df[col].min()
max_ = df[col].max()
if min_ > np.iinfo(np.int8).min and max_ < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif min_ > np.iinfo(np.int16).min and max_ < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif min_ > np.iinfo(np.int32).min and max_ < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
else:
df[col] = df[col].astype(np.int64)
else:
if min_ > np.finfo(np.float16).min and max_ < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif min_ > np.finfo(np.float32).min and max_ < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Final Memory chunk: {:.3f}".format(end_mem))
print("Reduced by: {:.2f}".format((start_mem - end_mem) / start_mem))
df.to_pickle(f"chunk_{chunk}.pkl")
print(f"chunk_{chunk}.pkl","saved!")
gc.collect()
chunksize = 10 ** 6
for chunk_id, chunk in enumerate(pd.read_csv(train_path, chunksize=chunksize)):
reduce_memory_usage(chunk, chunk_id)
appended_list = []
path = glob.glob(os.path.join(os.curdir, 'chunk_*.pkl'), recursive=True)
for item in tqdm(path):
df = pd.read_pickle(item)
appended_list.append(df)
final_frame = pd.concat(appended_list, axis=0, ignore_index=True)
int_col = [col for col in final_frame.columns if 'int' in str(final_frame[col].dtype)]
int_col
final_frame.drop(['row_id'], axis=1, inplace=True)
target = final_frame['target'].values
final_frame.drop(['target'], axis=1, inplace=True)
important_features = []
for col in tqdm(final_frame.columns):
pearson_relation = np.corrcoef(target, final_frame[col])[0, 1]
if np.abs(pearson_relation) >= 0.6:
important_features.append(col)
def split_set_index(data, size):
train_size = int(len(data) * size)
index = tf.random.shuffle(tf.range(len(data)))
return (index[:train_size], index[train_size:])
train_idx, val_idx = split_set_index(final_frame.values, size=0.8) | code |
89123748/cell_31 | [
"text_plain_output_1.png"
] | !pip install tensorflow-gpu==2.4.0 | code |
89123748/cell_14 | [
"text_plain_output_1.png"
] | from tqdm import tqdm
import gc
import glob
import numpy as np
import os
import pandas as pd
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import glob
tqdm.pandas()
import matplotlib.pyplot as plt
import gc
train_path = '../input/ubiquant-market-prediction/train.csv'
test_path = '../input/ubiquant-market-prediction/example_test.csv'
# Lets first try to reduce the size of the dataframe by bringing it to right dtype and saving those chunks.
def reduce_memory_usage(df, chunk):
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Initial Memory chunk: {:.3f}".format(start_mem))
for col in df.columns:
type_ = df[col].dtype
if str(type_) != "object":
if str(type_)[:3] == "int":
min_ = df[col].min()
max_ = df[col].max()
if min_ > np.iinfo(np.int8).min and max_ < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif min_ > np.iinfo(np.int16).min and max_ < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif min_ > np.iinfo(np.int32).min and max_ < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
else:
df[col] = df[col].astype(np.int64)
else:
if min_ > np.finfo(np.float16).min and max_ < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif min_ > np.finfo(np.float32).min and max_ < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Final Memory chunk: {:.3f}".format(end_mem))
print("Reduced by: {:.2f}".format((start_mem - end_mem) / start_mem))
df.to_pickle(f"chunk_{chunk}.pkl")
print(f"chunk_{chunk}.pkl","saved!")
gc.collect()
chunksize = 10 ** 6
for chunk_id, chunk in enumerate(pd.read_csv(train_path, chunksize=chunksize)):
reduce_memory_usage(chunk, chunk_id)
appended_list = []
path = glob.glob(os.path.join(os.curdir, 'chunk_*.pkl'), recursive=True)
for item in tqdm(path):
df = pd.read_pickle(item)
appended_list.append(df)
final_frame = pd.concat(appended_list, axis=0, ignore_index=True)
final_frame.info() | code |
89123748/cell_10 | [
"text_plain_output_1.png"
] | import gc
import numpy as np
import pandas as pd
train_path = '../input/ubiquant-market-prediction/train.csv'
test_path = '../input/ubiquant-market-prediction/example_test.csv'
# Lets first try to reduce the size of the dataframe by bringing it to right dtype and saving those chunks.
def reduce_memory_usage(df, chunk):
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Initial Memory chunk: {:.3f}".format(start_mem))
for col in df.columns:
type_ = df[col].dtype
if str(type_) != "object":
if str(type_)[:3] == "int":
min_ = df[col].min()
max_ = df[col].max()
if min_ > np.iinfo(np.int8).min and max_ < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif min_ > np.iinfo(np.int16).min and max_ < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif min_ > np.iinfo(np.int32).min and max_ < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
else:
df[col] = df[col].astype(np.int64)
else:
if min_ > np.finfo(np.float16).min and max_ < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif min_ > np.finfo(np.float32).min and max_ < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Final Memory chunk: {:.3f}".format(end_mem))
print("Reduced by: {:.2f}".format((start_mem - end_mem) / start_mem))
df.to_pickle(f"chunk_{chunk}.pkl")
print(f"chunk_{chunk}.pkl","saved!")
gc.collect()
chunksize = 10 ** 6
for chunk_id, chunk in enumerate(pd.read_csv(train_path, chunksize=chunksize)):
reduce_memory_usage(chunk, chunk_id) | code |
89123748/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from tqdm import tqdm
import gc
import glob
import numpy as np
import os
import pandas as pd
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import glob
tqdm.pandas()
import matplotlib.pyplot as plt
import gc
train_path = '../input/ubiquant-market-prediction/train.csv'
test_path = '../input/ubiquant-market-prediction/example_test.csv'
# Lets first try to reduce the size of the dataframe by bringing it to right dtype and saving those chunks.
def reduce_memory_usage(df, chunk):
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Initial Memory chunk: {:.3f}".format(start_mem))
for col in df.columns:
type_ = df[col].dtype
if str(type_) != "object":
if str(type_)[:3] == "int":
min_ = df[col].min()
max_ = df[col].max()
if min_ > np.iinfo(np.int8).min and max_ < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif min_ > np.iinfo(np.int16).min and max_ < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif min_ > np.iinfo(np.int32).min and max_ < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
else:
df[col] = df[col].astype(np.int64)
else:
if min_ > np.finfo(np.float16).min and max_ < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif min_ > np.finfo(np.float32).min and max_ < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Final Memory chunk: {:.3f}".format(end_mem))
print("Reduced by: {:.2f}".format((start_mem - end_mem) / start_mem))
df.to_pickle(f"chunk_{chunk}.pkl")
print(f"chunk_{chunk}.pkl","saved!")
gc.collect()
chunksize = 10 ** 6
for chunk_id, chunk in enumerate(pd.read_csv(train_path, chunksize=chunksize)):
reduce_memory_usage(chunk, chunk_id)
appended_list = []
path = glob.glob(os.path.join(os.curdir, 'chunk_*.pkl'), recursive=True)
for item in tqdm(path):
df = pd.read_pickle(item)
appended_list.append(df)
final_frame = pd.concat(appended_list, axis=0, ignore_index=True) | code |
2016761/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas import DataFrame
from pandas import Series
import matplotlib.pyplot as plt
data = pd.read_csv('../input/Top_hashtag.csv')
data.shape | code |
2016761/cell_1 | [
"text_plain_output_1.png"
] | from subprocess import check_output
import numpy as np
import pandas as pd
from subprocess import check_output
print(check_output(['ls', '../input']).decode('utf8')) | code |
2016761/cell_5 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from pandas import DataFrame
from pandas import Series
import matplotlib.pyplot as plt
data = pd.read_csv('../input/Top_hashtag.csv')
data.shape
x1 = data['Hashtag']
y1 = data['Posts']
l = data['Likes']
c = data['Comments']
xv = np.array(x1)
yv = np.array(y1)
plt.plot(xv, yv)
plt.show() | code |
130014911/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count')
this_data = thai_accident_from_to("2019-01-01", "2021-05-31")
fig, ax = plt.subplots(2,2, figsize=(14,10))
fig.suptitle("Road Accident [2011-2022]")
# 00
sns.histplot(ax=ax[0,0], x=this_data["year"], discrete=True)
ax[0,0].set_title("Accident by year", y=1.05)
ax[0,0].bar_label(ax[0,0].containers[1])
# 01
this_data["province_en"].value_counts()[:10].sort_values().plot(kind="barh", ax=ax[0,1])
ax[0,1].set_title("Top 10 accident by province")
ax[0,1].bar_label(ax[0,1].containers[0], label_type="center", color="white")
# 10
this_data["vehicle_type"].value_counts().sort_values().plot(kind="barh", ax=ax[1,0])
ax[1,0].set_title("Top accident by vehicle type")
ax[1,0].bar_label(ax[1,0].containers[0])
# 11
ax[1,1] = sns.histplot(data=this_data, x="age",hue="gender", element="poly",discrete=True)
ax[1,1].set_title("Accident by age and gender")
plt.show()
gender_df = this_data['gender'].value_counts().reset_index()
gender_df
heat_group = this_data.groupby(['month', 'day']).size().reset_index(name='count')
heat = heat_group.pivot_table(index='month', columns='day', values='count', fill_value=0)
fig, ax = plt.subplots(figsize=(15, 8))
sns.heatmap(heat, annot=True, ax=ax, fmt='.3g')
ax.set_title('Accident by day and month', y=1.03)
plt.show() | code |
130014911/cell_9 | [
"image_output_2.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count')
print(counts.tail())
plt.figure(figsize=(14, 7))
sns.lineplot(data=counts, x='month', y='count', hue='year')
plt.show() | code |
130014911/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
print(thai_accident_df.shape) | code |
130014911/cell_6 | [
"text_html_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
print(thai_accident_df.isnull().sum())
thai_accident_df.describe().T | code |
130014911/cell_2 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df.tail() | code |
130014911/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count')
this_data = thai_accident_from_to('2019-01-01', '2021-05-31')
fig, ax = plt.subplots(2, 2, figsize=(14, 10))
fig.suptitle('Road Accident [2011-2022]')
sns.histplot(ax=ax[0, 0], x=this_data['year'], discrete=True)
ax[0, 0].set_title('Accident by year', y=1.05)
ax[0, 0].bar_label(ax[0, 0].containers[1])
this_data['province_en'].value_counts()[:10].sort_values().plot(kind='barh', ax=ax[0, 1])
ax[0, 1].set_title('Top 10 accident by province')
ax[0, 1].bar_label(ax[0, 1].containers[0], label_type='center', color='white')
this_data['vehicle_type'].value_counts().sort_values().plot(kind='barh', ax=ax[1, 0])
ax[1, 0].set_title('Top accident by vehicle type')
ax[1, 0].bar_label(ax[1, 0].containers[0])
ax[1, 1] = sns.histplot(data=this_data, x='age', hue='gender', element='poly', discrete=True)
ax[1, 1].set_title('Accident by age and gender')
plt.show() | code |
130014911/cell_1 | [
"text_plain_output_2.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
130014911/cell_8 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
print('Ready for Data visualization') | code |
130014911/cell_3 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
print(f'Chech Datatype\n{df.dtypes}')
print('\nShape check')
print(df.shape)
print()
print(df.isnull().sum())
df['official_death_date'] = pd.to_datetime(df['official_death_date']) | code |
130014911/cell_10 | [
"text_html_output_1.png",
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count')
print('Simple data below\n')
print(f"All accident data from {thai_accident_df['accident_date'].min().date()} to {thai_accident_df['accident_date'].max().date()} \n{thai_accident_df.shape[0]} cases\n")
print('# By Gender')
gender_count = thai_accident_df['gender'].value_counts().reset_index()
gender_count.columns = ['gender', 'g_count']
gender_count['%'] = gender_count['g_count'] / gender_count['g_count'].sum() * 100
print(gender_count)
print('\n# By Vehicle type')
print(thai_accident_df['vehicle_type'].value_counts())
print('\n# By province')
print(thai_accident_df['province_en'].value_counts())
print(thai_accident_df['province_en'].value_counts().describe()) | code |
130014911/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import seaborn as sns
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes
thai_accident_df.describe().T
import matplotlib.pyplot as plt
import seaborn as sns
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df['year'], thai_accident_df['month'], thai_accident_df['day'] = (thai_accident_df['accident_date'].dt.year, thai_accident_df['accident_date'].dt.month, thai_accident_df['accident_date'].dt.day)
def thai_accident_from_to(from_date=thai_accident_df['accident_date'].min(), to_date=thai_accident_df['accident_date'].max()):
df = thai_accident_df[(thai_accident_df['accident_date'] >= from_date) & (thai_accident_df['accident_date'] < to_date)]
return df
counts = thai_accident_df.groupby(['year', 'month']).size().reset_index(name='count')
this_data = thai_accident_from_to("2019-01-01", "2021-05-31")
fig, ax = plt.subplots(2,2, figsize=(14,10))
fig.suptitle("Road Accident [2011-2022]")
# 00
sns.histplot(ax=ax[0,0], x=this_data["year"], discrete=True)
ax[0,0].set_title("Accident by year", y=1.05)
ax[0,0].bar_label(ax[0,0].containers[1])
# 01
this_data["province_en"].value_counts()[:10].sort_values().plot(kind="barh", ax=ax[0,1])
ax[0,1].set_title("Top 10 accident by province")
ax[0,1].bar_label(ax[0,1].containers[0], label_type="center", color="white")
# 10
this_data["vehicle_type"].value_counts().sort_values().plot(kind="barh", ax=ax[1,0])
ax[1,0].set_title("Top accident by vehicle type")
ax[1,0].bar_label(ax[1,0].containers[0])
# 11
ax[1,1] = sns.histplot(data=this_data, x="age",hue="gender", element="poly",discrete=True)
ax[1,1].set_title("Accident by age and gender")
plt.show()
gender_df = this_data['gender'].value_counts().reset_index()
plt.pie(gender_df['gender'], labels=gender_df['index'], autopct='%1.1f%%', explode=(0.1, 0, 0))
plt.show()
gender_df | code |
130014911/cell_5 | [
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('/kaggle/input/thailand-fatal-road-accident/thailand_fatal_raod_accident_2011_2022.csv')
df['official_death_date'] = pd.to_datetime(df['official_death_date'])
thai_accident_df = df.dropna(subset='accident_date').copy()
thai_accident_df['accident_date'] = pd.to_datetime(thai_accident_df['accident_date'])
thai_accident_df.dtypes | code |
90105207/cell_13 | [
"text_plain_output_1.png"
] | list3 = [1, 3, 45, 67, 89, 0, 'five', 'six']
print(list3)
list3.pop(4)
print(list3, 'the element at index no 4 is removed')
list3.pop(5)
print(list3, ' the element at index 8 is removed') | code |
90105207/cell_9 | [
"text_plain_output_1.png"
] | lst = [4, 6, 4, 78, 32, 0, 1]
print('unsorted lst', lst)
lst.sort()
print('sorted lst', lst) | code |
90105207/cell_4 | [
"text_plain_output_1.png"
] | MyList = ('This is my lis of fruits', 'Strawbery', 'Mango', 'Grapes', 'Malta')
print(len(MyList)) | code |
90105207/cell_6 | [
"text_plain_output_1.png"
] | Listtypes = ('Mudassir ID=', 27129, 'CGPA=', 3.14, 'Promoted=', True, 'Failed in any subjec?=', False)
print(Listtypes) | code |
90105207/cell_2 | [
"text_plain_output_1.png"
] | MyList = ('This is my lis of fruits', 'Strawbery', 'Mango', 'Grapes', 'Malta')
print(MyList) | code |
90105207/cell_11 | [
"text_plain_output_1.png"
] | lst2 = [2, 6, 90, 30, 5]
print(lst2, 'Non appended')
lst2.append(5)
print(lst2, 'Appended')
lst2.append('Digits')
print(lst2, 'Appended')
lst3 = [2, 6, 90, 30, 5, 'Mudassir', 'Khan']
lst3.append('Digits')
print(lst3, 'Appended') | code |
90105207/cell_7 | [
"text_plain_output_1.png"
] | Listtypes = ('Mudassir ID=', 27129, 'CGPA=', 3.14, 'Promoted=', True, 'Failed in any subjec?=', False)
print(Listtypes[-4:9])
print(Listtypes[7]) | code |
90105207/cell_14 | [
"text_plain_output_1.png"
] | list4 = [10, 20, 30, 40, 50, 'Alpha', 'Beta', 'Gama']
print(list4)
list4.remove('Gama')
print(list4) | code |
90105207/cell_10 | [
"text_plain_output_1.png"
] | lst = [4, 6, 4, 78, 32, 0, 1]
lst.sort()
print('Unreversed', lst)
lst.reverse()
print('Reversed list', lst) | code |
90105207/cell_12 | [
"text_plain_output_1.png"
] | listt = [90, 3, 45, 67, 86, 89, 90, 100]
print('uninserted', listt)
listt.insert(1, 5)
print('inserted=', listt, '5 element inserted at index 1')
listt2 = [1, 2, 3, 4, 5, 6, 7, 'Sunday', 'Monday', 8, 9, 10]
print(listt2, 'Without insertion')
listt2.insert(7, 'Saturday')
print(listt2, "With insertion of 'Saturday at the place of '7'") | code |
128046373/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | library(tidyverse)
library(here)
library(skimr)
library(janitor)
library(lubridate)
library(ggrepel)
library(ggplot2) | code |
88104085/cell_4 | [
"text_plain_output_1.png"
] | from keras.preprocessing.image import ImageDataGenerator
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_train.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
valid_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
images_path_dir = '../input/my-pre-data'
datagen = ImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True)
valid_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=30)
targetsize = (224, 224)
classmode = 'binary'
batchsize = 32
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
valid_generator = valid_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True) | code |
88104085/cell_6 | [
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
from keras.preprocessing.image import ImageDataGenerator
import math
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_addons as tfa
train_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_train.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
valid_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
images_path_dir = '../input/my-pre-data'
datagen = ImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True)
valid_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=30)
targetsize = (224, 224)
classmode = 'binary'
batchsize = 32
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
valid_generator = valid_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
x = dense_model_1(input_image, training=True)
x = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
x = tf.keras.layers.Dropout(0.3)(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.ResNet50(weights='imagenet', include_top=False)
y = dense_model_2(input_image, training=True)
y = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(y)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
z = dense_model_3(input_image, training=True)
z = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(z)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dropout(0.3)(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
z = tf.keras.layers.Dropout(0.3)(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
ensemble_model = tf.keras.models.Model(input_image, mean_nn_only)
ensemble_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='binary_crossentropy', metrics=[tfa.metrics.CohenKappa(num_classes=2), 'accuracy'])
STEP_SIZE_TRAIN = math.ceil(train_generator.n / train_generator.batch_size)
STEP_SIZE_VALID = math.ceil(valid_generator.n / valid_generator.batch_size)
ensemble_model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=15, verbose=1)
_, cohen, acc = ensemble_model.evaluate(train_generator, verbose=1)
print(' accuracy overall: %.3f' % acc, end=' ')
print('kappa overall: %.3f' % cohen) | code |
88104085/cell_1 | [
"text_plain_output_1.png"
] | import pandas as pd
import os
from glob import glob
import tensorflow as tf
import keras_tuner as kt
from tensorflow import keras
print('TensorFlow version is ', tf.__version__)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import tensorflow_addons as tfa
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.io import imread
import os
from glob import glob
from sklearn.metrics import classification_report
import numpy as np
from sklearn.utils import shuffle
from keras import regularizers
from keras.models import Sequential, Model, load_model, Input
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D, GlobalAveragePooling2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
import keras.layers as Layers
from keras.callbacks import EarlyStopping, ModelCheckpoint
import keras.optimizers as Optimizer
print(tf.__version__)
from keras import applications
from tensorflow import keras
import math
import tensorflow_addons as tfa
import tensorflow as tf
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from imblearn.metrics import sensitivity_specificity_support
from sklearn.metrics import classification_report, roc_auc_score
from imblearn.metrics import geometric_mean_score
import seaborn as sn
import pandas as pd
from sklearn.datasets import make_blobs
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from keras.models import load_model
from numpy import dstack | code |
88104085/cell_8 | [
"text_html_output_1.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from keras.models import Sequential,Model,load_model,Input
from keras.preprocessing.image import ImageDataGenerator
import math
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_addons as tfa
train_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_train.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
valid_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
images_path_dir = '../input/my-pre-data'
datagen = ImageDataGenerator(samplewise_center=True, samplewise_std_normalization=True, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.1, horizontal_flip=True, vertical_flip=True)
valid_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=30)
targetsize = (224, 224)
classmode = 'binary'
batchsize = 32
train_generator = datagen.flow_from_dataframe(dataframe=train_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
valid_generator = valid_datagen.flow_from_dataframe(dataframe=valid_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
input_image = Input(shape=(224, 224, 3), name='original_img')
dense_model_1 = tf.keras.applications.DenseNet169(include_top=False, weights='imagenet')
x = dense_model_1(input_image, training=True)
x = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dense(81, activation='relu')(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(42, activation='relu')(x)
x = tf.keras.layers.Dropout(0.3)(x)
preds_dense_net = tf.keras.layers.Dense(1, activation='sigmoid')(x)
dense_model_2 = tf.keras.applications.ResNet50(weights='imagenet', include_top=False)
y = dense_model_2(input_image, training=True)
y = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(y)
y = tf.keras.layers.GlobalAveragePooling2D()(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dense(81, activation='relu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
y = tf.keras.layers.Dense(42, activation='relu')(y)
y = tf.keras.layers.Dropout(0.3)(y)
preds_resnet_net = tf.keras.layers.Dense(1, activation='sigmoid')(y)
dense_model_3 = tf.keras.applications.MobileNet(include_top=False, weights='imagenet')
z = dense_model_3(input_image, training=True)
z = tf.keras.layers.Conv2D(20, (1, 1), activation='relu')(z)
z = tf.keras.layers.GlobalAveragePooling2D()(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dense(81, activation='relu')(z)
z = tf.keras.layers.Dropout(0.3)(z)
z = tf.keras.layers.Dense(42, activation='relu')(z)
z = tf.keras.layers.Dropout(0.3)(z)
preds_mobi_net = tf.keras.layers.Dense(1, activation='sigmoid')(z)
mean_nn_only = tf.reduce_mean(tf.stack([preds_mobi_net, preds_resnet_net, preds_dense_net], axis=0), axis=0)
ensemble_model = tf.keras.models.Model(input_image, mean_nn_only)
ensemble_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001), loss='binary_crossentropy', metrics=[tfa.metrics.CohenKappa(num_classes=2), 'accuracy'])
STEP_SIZE_TRAIN = math.ceil(train_generator.n / train_generator.batch_size)
STEP_SIZE_VALID = math.ceil(valid_generator.n / valid_generator.batch_size)
ensemble_model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, epochs=15, verbose=1)
_, cohen, acc = ensemble_model.evaluate(train_generator, verbose=1)
train_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_test.csv'
test_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
test_images_paths.columns = ['image_path']
test_images_paths['label'] = test_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
test_images_paths['category'] = test_images_paths['image_path'].apply(lambda x: x.split('/')[2])
datagen = ImageDataGenerator(rescale=1.0 / 255)
test_images_paths_XR_FINGER = test_images_paths[test_images_paths['category'] == 'XR_FINGER']
test_generator_XR_FINGER = datagen.flow_from_dataframe(dataframe=test_images_paths_XR_FINGER, directory=images_path_dir, x_col='image_path', y_col='label', target_size=targetsize, class_mode=classmode, batch_size=batchsize, shuffle=True)
_, cohen, acc = ensemble_model.evaluate(test_generator_XR_FINGER, verbose=1)
print(' accuracy XR_FINGER: %.3f' % acc, end=' ')
print('kappa XR_FINGER: %.3f' % cohen) | code |
88104085/cell_3 | [
"text_plain_output_1.png"
] | import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_train.csv'
train_images_paths = pd.read_csv(os.path.join(train_img_csv), dtype=str, header=None)
train_images_paths.columns = ['image_path']
valid_img_csv = '../input/my-pre-data/MURA-v1.1/abdekho_valid.csv'
valid_images_paths = pd.read_csv(os.path.join(valid_img_csv), dtype=str, header=None)
valid_images_paths.columns = ['image_path']
train_images_paths['label'] = train_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
train_images_paths['category'] = train_images_paths['image_path'].apply(lambda x: x.split('/')[2])
valid_images_paths['label'] = valid_images_paths['image_path'].map(lambda x: '1' if 'positive' in x else '0')
valid_images_paths['category'] = valid_images_paths['image_path'].apply(lambda x: x.split('/')[2])
train_images_paths_XR_FINGER = train_images_paths[train_images_paths['category'] == 'XR_FINGER']
print('\n\npositive casses:', len(train_images_paths_XR_FINGER[train_images_paths_XR_FINGER['label'] == '1']))
print('\n\nnegative casses:', len(train_images_paths_XR_FINGER[train_images_paths_XR_FINGER['label'] == '0']))
valid_images_paths_XR_FINGER = valid_images_paths[valid_images_paths['category'] == 'XR_FINGER']
print('\n\npositive casses:', len(valid_images_paths_XR_FINGER[valid_images_paths_XR_FINGER['label'] == '1']))
print('\n\nnegative casses:', len(valid_images_paths_XR_FINGER[valid_images_paths_XR_FINGER['label'] == '0'])) | code |
34147702/cell_13 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import re
import tensorflow as tf
import numpy as np
import pandas as pd
import os
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_images(path):
images = []
bedroom = []
bathroom = []
frontal = []
kitchen = []
pattern = re.compile('([0-9]{1,3})_(bathroom|bedroom|frontal|kitchen).jpg$')
files = os.listdir(path=path)
files.sort()
for filename in files:
if pattern.match(filename) is None:
continue
img = cv2.imread(path + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
if 'bedroom' in filename:
bedroom.append(img)
if 'bathroom' in filename:
bathroom.append(img)
if 'front' in filename:
frontal.append(img)
if 'kitchen' in filename:
kitchen.append(img)
for i in range(len(bedroom)):
tiles = [[bedroom[i], bathroom[i]], [frontal[i], kitchen[i]]]
image_concat = cv2.vconcat([cv2.hconcat(v_list) for v_list in tiles])
images.append(image_concat)
return np.array(images) / 255.0
def create_cnn():
model = Sequential()
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
'\n 演習:kernel_sizeを変更してみてください\n '
kernel_size = (5, 5)
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=inputShape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(2, 2), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
'\n 演習:もう一層Conv2D->MaxPooling2D->BatchNormalization->Dropoutを追加してください\n '
model.add(Flatten())
model.add(Dense(units=256, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=32, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=1, activation='linear'))
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
return model
def kfold(train_images_x, train_y, valid_images_x, valid_y):
k = 1
train_x = train_images_x
valid_x = valid_images_x
num_val_samples = len(train_x) // k
num_epochs = 32
all_scores = []
all_mape_histories = []
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
for i in range(k):
val_data = train_x[i * num_val_samples:(i + 1) * num_val_samples]
val_targets = train_y[i * num_val_samples:(i + 1) * num_val_samples]
partial_train_data = np.concatenate([train_x[:i * num_val_samples], train_x[(i + 1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate([train_y[:i * num_val_samples], train_y[(i + 1) * num_val_samples:]], axis=0)
model = create_cnn()
history = model.fit(partial_train_data, partial_train_targets, validation_data=(valid_x, valid_y), epochs=num_epochs, batch_size=1, verbose=0, callbacks=[es, checkpoint, reduce_lr_loss])
val_mse, val_mape = model.evaluate(valid_x, valid_y, verbose=0)
all_scores.append(val_mape)
mape_history = history.history['val_mape']
all_mape_histories.append(mape_history)
return (all_scores, all_mape_histories)
def leave_one_out(train_image_x, train_y, valid_images_x, valid_y):
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=PATH_TO_HDF5, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
model = create_cnn()
model.fit(train_images_x, train_y, validation_data=(valid_images_x, valid_y), epochs=50, batch_size=16, callbacks=[es, checkpoint, reduce_lr_loss])
return model
train = pd.read_csv(PATH_TO_TRAIN)
train_images = load_images(PATH_TO_TRAIN_IMAGE)
test_images = load_images(PATH_TO_TEST_IMAGE)
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=PER_TEST)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
mean = train_x.mean(axis=0)
train_x -= mean
std = train_x.std(axis=0)
train_x /= std
valid_x -= mean
valid_x /= std
model = leave_one_out(train_images_x, train_y, valid_images_x, valid_y) | code |
34147702/cell_11 | [
"text_plain_output_1.png"
] | import cv2
import numpy as np
import numpy as np # linear algebra
import os
import os
import random
import re
import tensorflow as tf
import numpy as np
import pandas as pd
import os
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_images(path):
images = []
bedroom = []
bathroom = []
frontal = []
kitchen = []
pattern = re.compile('([0-9]{1,3})_(bathroom|bedroom|frontal|kitchen).jpg$')
files = os.listdir(path=path)
files.sort()
for filename in files:
if pattern.match(filename) is None:
continue
img = cv2.imread(path + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
if 'bedroom' in filename:
bedroom.append(img)
if 'bathroom' in filename:
bathroom.append(img)
if 'front' in filename:
frontal.append(img)
if 'kitchen' in filename:
kitchen.append(img)
for i in range(len(bedroom)):
tiles = [[bedroom[i], bathroom[i]], [frontal[i], kitchen[i]]]
image_concat = cv2.vconcat([cv2.hconcat(v_list) for v_list in tiles])
images.append(image_concat)
return np.array(images) / 255.0
train_images = load_images(PATH_TO_TRAIN_IMAGE)
test_images = load_images(PATH_TO_TEST_IMAGE)
display(train_images.shape) | code |
34147702/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
34147702/cell_15 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import re
import tensorflow as tf
import numpy as np
import pandas as pd
import os
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_images(path):
images = []
bedroom = []
bathroom = []
frontal = []
kitchen = []
pattern = re.compile('([0-9]{1,3})_(bathroom|bedroom|frontal|kitchen).jpg$')
files = os.listdir(path=path)
files.sort()
for filename in files:
if pattern.match(filename) is None:
continue
img = cv2.imread(path + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
if 'bedroom' in filename:
bedroom.append(img)
if 'bathroom' in filename:
bathroom.append(img)
if 'front' in filename:
frontal.append(img)
if 'kitchen' in filename:
kitchen.append(img)
for i in range(len(bedroom)):
tiles = [[bedroom[i], bathroom[i]], [frontal[i], kitchen[i]]]
image_concat = cv2.vconcat([cv2.hconcat(v_list) for v_list in tiles])
images.append(image_concat)
return np.array(images) / 255.0
def create_cnn():
model = Sequential()
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
'\n 演習:kernel_sizeを変更してみてください\n '
kernel_size = (5, 5)
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=inputShape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(2, 2), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
'\n 演習:もう一層Conv2D->MaxPooling2D->BatchNormalization->Dropoutを追加してください\n '
model.add(Flatten())
model.add(Dense(units=256, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=32, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=1, activation='linear'))
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
return model
def kfold(train_images_x, train_y, valid_images_x, valid_y):
k = 1
train_x = train_images_x
valid_x = valid_images_x
num_val_samples = len(train_x) // k
num_epochs = 32
all_scores = []
all_mape_histories = []
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
for i in range(k):
val_data = train_x[i * num_val_samples:(i + 1) * num_val_samples]
val_targets = train_y[i * num_val_samples:(i + 1) * num_val_samples]
partial_train_data = np.concatenate([train_x[:i * num_val_samples], train_x[(i + 1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate([train_y[:i * num_val_samples], train_y[(i + 1) * num_val_samples:]], axis=0)
model = create_cnn()
history = model.fit(partial_train_data, partial_train_targets, validation_data=(valid_x, valid_y), epochs=num_epochs, batch_size=1, verbose=0, callbacks=[es, checkpoint, reduce_lr_loss])
val_mse, val_mape = model.evaluate(valid_x, valid_y, verbose=0)
all_scores.append(val_mape)
mape_history = history.history['val_mape']
all_mape_histories.append(mape_history)
return (all_scores, all_mape_histories)
def leave_one_out(train_image_x, train_y, valid_images_x, valid_y):
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=PATH_TO_HDF5, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
model = create_cnn()
model.fit(train_images_x, train_y, validation_data=(valid_images_x, valid_y), epochs=50, batch_size=16, callbacks=[es, checkpoint, reduce_lr_loss])
return model
train = pd.read_csv(PATH_TO_TRAIN)
train_images = load_images(PATH_TO_TRAIN_IMAGE)
test_images = load_images(PATH_TO_TEST_IMAGE)
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=PER_TEST)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
mean = train_x.mean(axis=0)
train_x -= mean
std = train_x.std(axis=0)
train_x /= std
valid_x -= mean
valid_x /= std
model = leave_one_out(train_images_x, train_y, valid_images_x, valid_y)
y_pred = model.predict(test_images, batch_size=32)
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = (np.array(y_true), np.array(y_pred))
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
model.load_weights(PATH_TO_HDF5)
valid_pred = model.predict(valid_images_x, batch_size=32).reshape((-1, 1))
mape_score = mean_absolute_percentage_error(valid_y, valid_pred)
print(mape_score) | code |
34147702/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import re
import tensorflow as tf
import numpy as np
import pandas as pd
import os
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_images(path):
images = []
bedroom = []
bathroom = []
frontal = []
kitchen = []
pattern = re.compile('([0-9]{1,3})_(bathroom|bedroom|frontal|kitchen).jpg$')
files = os.listdir(path=path)
files.sort()
for filename in files:
if pattern.match(filename) is None:
continue
img = cv2.imread(path + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
if 'bedroom' in filename:
bedroom.append(img)
if 'bathroom' in filename:
bathroom.append(img)
if 'front' in filename:
frontal.append(img)
if 'kitchen' in filename:
kitchen.append(img)
for i in range(len(bedroom)):
tiles = [[bedroom[i], bathroom[i]], [frontal[i], kitchen[i]]]
image_concat = cv2.vconcat([cv2.hconcat(v_list) for v_list in tiles])
images.append(image_concat)
return np.array(images) / 255.0
def create_cnn():
model = Sequential()
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
'\n 演習:kernel_sizeを変更してみてください\n '
kernel_size = (5, 5)
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=inputShape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(2, 2), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
'\n 演習:もう一層Conv2D->MaxPooling2D->BatchNormalization->Dropoutを追加してください\n '
model.add(Flatten())
model.add(Dense(units=256, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=32, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=1, activation='linear'))
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
return model
def kfold(train_images_x, train_y, valid_images_x, valid_y):
k = 1
train_x = train_images_x
valid_x = valid_images_x
num_val_samples = len(train_x) // k
num_epochs = 32
all_scores = []
all_mape_histories = []
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
for i in range(k):
val_data = train_x[i * num_val_samples:(i + 1) * num_val_samples]
val_targets = train_y[i * num_val_samples:(i + 1) * num_val_samples]
partial_train_data = np.concatenate([train_x[:i * num_val_samples], train_x[(i + 1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate([train_y[:i * num_val_samples], train_y[(i + 1) * num_val_samples:]], axis=0)
model = create_cnn()
history = model.fit(partial_train_data, partial_train_targets, validation_data=(valid_x, valid_y), epochs=num_epochs, batch_size=1, verbose=0, callbacks=[es, checkpoint, reduce_lr_loss])
val_mse, val_mape = model.evaluate(valid_x, valid_y, verbose=0)
all_scores.append(val_mape)
mape_history = history.history['val_mape']
all_mape_histories.append(mape_history)
return (all_scores, all_mape_histories)
def leave_one_out(train_image_x, train_y, valid_images_x, valid_y):
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=PATH_TO_HDF5, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
model = create_cnn()
model.fit(train_images_x, train_y, validation_data=(valid_images_x, valid_y), epochs=50, batch_size=16, callbacks=[es, checkpoint, reduce_lr_loss])
return model
train = pd.read_csv(PATH_TO_TRAIN)
train_images = load_images(PATH_TO_TRAIN_IMAGE)
test_images = load_images(PATH_TO_TEST_IMAGE)
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=PER_TEST)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
mean = train_x.mean(axis=0)
train_x -= mean
std = train_x.std(axis=0)
train_x /= std
valid_x -= mean
valid_x /= std
model = leave_one_out(train_images_x, train_y, valid_images_x, valid_y)
y_pred = model.predict(test_images, batch_size=32)
print(y_pred) | code |
34147702/cell_10 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
train = pd.read_csv(PATH_TO_TRAIN)
display(train.shape)
display(train.head()) | code |
34147702/cell_12 | [
"text_plain_output_4.png",
"text_plain_output_3.png",
"text_plain_output_2.png",
"text_plain_output_1.png"
] | from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, ReduceLROnPlateau
from tensorflow.keras.layers import BatchNormalization,Activation,Dropout,Dense
from tensorflow.keras.layers import Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.models import Sequential
import cv2
import numpy as np
import numpy as np # linear algebra
import os
import os
import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import random
import re
import tensorflow as tf
import numpy as np
import pandas as pd
import os
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def load_images(path):
images = []
bedroom = []
bathroom = []
frontal = []
kitchen = []
pattern = re.compile('([0-9]{1,3})_(bathroom|bedroom|frontal|kitchen).jpg$')
files = os.listdir(path=path)
files.sort()
for filename in files:
if pattern.match(filename) is None:
continue
img = cv2.imread(path + filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (IMAGE_SIZE, IMAGE_SIZE))
if 'bedroom' in filename:
bedroom.append(img)
if 'bathroom' in filename:
bathroom.append(img)
if 'front' in filename:
frontal.append(img)
if 'kitchen' in filename:
kitchen.append(img)
for i in range(len(bedroom)):
tiles = [[bedroom[i], bathroom[i]], [frontal[i], kitchen[i]]]
image_concat = cv2.vconcat([cv2.hconcat(v_list) for v_list in tiles])
images.append(image_concat)
return np.array(images) / 255.0
def create_cnn():
model = Sequential()
inputShape = (IMAGE_SIZE * 2, IMAGE_SIZE * 2, 3)
'\n 演習:kernel_sizeを変更してみてください\n '
kernel_size = (5, 5)
model.add(Conv2D(filters=32, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal', input_shape=inputShape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=64, kernel_size=kernel_size, strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
model.add(Conv2D(filters=128, kernel_size=(2, 2), strides=(1, 1), padding='valid', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.1))
'\n 演習:もう一層Conv2D->MaxPooling2D->BatchNormalization->Dropoutを追加してください\n '
model.add(Flatten())
model.add(Dense(units=256, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=32, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(units=1, activation='linear'))
model.compile(loss='mape', optimizer='adam', metrics=['mape'])
return model
def kfold(train_images_x, train_y, valid_images_x, valid_y):
k = 1
train_x = train_images_x
valid_x = valid_images_x
num_val_samples = len(train_x) // k
num_epochs = 32
all_scores = []
all_mape_histories = []
filepath = 'cnn_best_model.hdf5'
es = EarlyStopping(patience=5, mode='min', verbose=1)
checkpoint = ModelCheckpoint(monitor='val_loss', filepath=filepath, save_best_only=True, mode='auto')
reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', patience=2, verbose=1, mode='min')
for i in range(k):
val_data = train_x[i * num_val_samples:(i + 1) * num_val_samples]
val_targets = train_y[i * num_val_samples:(i + 1) * num_val_samples]
partial_train_data = np.concatenate([train_x[:i * num_val_samples], train_x[(i + 1) * num_val_samples:]], axis=0)
partial_train_targets = np.concatenate([train_y[:i * num_val_samples], train_y[(i + 1) * num_val_samples:]], axis=0)
model = create_cnn()
history = model.fit(partial_train_data, partial_train_targets, validation_data=(valid_x, valid_y), epochs=num_epochs, batch_size=1, verbose=0, callbacks=[es, checkpoint, reduce_lr_loss])
val_mse, val_mape = model.evaluate(valid_x, valid_y, verbose=0)
all_scores.append(val_mape)
mape_history = history.history['val_mape']
all_mape_histories.append(mape_history)
return (all_scores, all_mape_histories)
train = pd.read_csv(PATH_TO_TRAIN)
train_images = load_images(PATH_TO_TRAIN_IMAGE)
test_images = load_images(PATH_TO_TEST_IMAGE)
train_x, valid_x, train_images_x, valid_images_x = train_test_split(train, train_images, test_size=PER_TEST)
train_y = train_x['price'].values
valid_y = valid_x['price'].values
display(train_images_x.shape)
display(valid_images_x.shape)
display(train_y.shape)
display(valid_y.shape)
mean = train_x.mean(axis=0)
train_x -= mean
std = train_x.std(axis=0)
train_x /= std
valid_x -= mean
valid_x /= std | code |
128045913/cell_9 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw
from xml.dom import minidom
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
tree = ET.parse(xmls[4])
rough_string = ET.tostring(tree.getroot(), 'utf')
reparsed = minidom.parseString(rough_string)
data = {'Number': [], 'Age': [], 'Sex': [], 'Composition': [], 'Echogenicity': [], 'Margins': [], 'Calcifications': [], 'Tirads': [], 'Reportbacaf': [], 'Reporteco': []}
svg_strings = {}
for xml in xmls:
tree = ET.parse(xml)
root = tree.getroot()
case_number = int(root.find('number').text)
data['Number'].append(case_number)
if root.find('age').text:
data['Age'].append(int(root.find('age').text))
else:
data['Age'].append(root.find('age').text)
data['Sex'].append(root.find('sex').text)
data['Composition'].append(root.find('composition').text)
data['Echogenicity'].append(root.find('echogenicity').text)
data['Margins'].append(root.find('margins').text)
data['Calcifications'].append(root.find('calcifications').text)
data['Tirads'].append(root.find('tirads').text)
data['Reportbacaf'].append(root.find('reportbacaf').text)
data['Reporteco'].append(root.find('reporteco').text)
for mark in root.findall('mark'):
image_idx = mark.find('image').text
svg_strings[f'{case_number}_{image_idx}'] = mark.find('svg').text
df = pd.DataFrame(data)
df.sort_values(by='Number', inplace=True)
df.set_index('Number', inplace=True)
Image.open(jpgs[0])
unique_dims = []
for jpg in jpgs:
dims = Image.open(jpg).size
if dims not in unique_dims:
unique_dims.append(dims)
image_size = unique_dims[0]
list(svg_strings.items())[0] | code |
128045913/cell_4 | [
"text_plain_output_1.png"
] | from xml.dom import minidom
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
tree = ET.parse(xmls[4])
rough_string = ET.tostring(tree.getroot(), 'utf')
reparsed = minidom.parseString(rough_string)
data = {'Number': [], 'Age': [], 'Sex': [], 'Composition': [], 'Echogenicity': [], 'Margins': [], 'Calcifications': [], 'Tirads': [], 'Reportbacaf': [], 'Reporteco': []}
svg_strings = {}
for xml in xmls:
tree = ET.parse(xml)
root = tree.getroot()
case_number = int(root.find('number').text)
data['Number'].append(case_number)
if root.find('age').text:
data['Age'].append(int(root.find('age').text))
else:
data['Age'].append(root.find('age').text)
data['Sex'].append(root.find('sex').text)
data['Composition'].append(root.find('composition').text)
data['Echogenicity'].append(root.find('echogenicity').text)
data['Margins'].append(root.find('margins').text)
data['Calcifications'].append(root.find('calcifications').text)
data['Tirads'].append(root.find('tirads').text)
data['Reportbacaf'].append(root.find('reportbacaf').text)
data['Reporteco'].append(root.find('reporteco').text)
for mark in root.findall('mark'):
image_idx = mark.find('image').text
svg_strings[f'{case_number}_{image_idx}'] = mark.find('svg').text
df = pd.DataFrame(data)
df.sort_values(by='Number', inplace=True)
df.set_index('Number', inplace=True)
df.count() | code |
128045913/cell_2 | [
"text_plain_output_1.png"
] | from xml.dom import minidom
import os
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
tree = ET.parse(xmls[4])
rough_string = ET.tostring(tree.getroot(), 'utf')
reparsed = minidom.parseString(rough_string)
print(reparsed.toprettyxml(indent=' ')) | code |
128045913/cell_11 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw
from xml.dom import minidom
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
tree = ET.parse(xmls[4])
rough_string = ET.tostring(tree.getroot(), 'utf')
reparsed = minidom.parseString(rough_string)
data = {'Number': [], 'Age': [], 'Sex': [], 'Composition': [], 'Echogenicity': [], 'Margins': [], 'Calcifications': [], 'Tirads': [], 'Reportbacaf': [], 'Reporteco': []}
svg_strings = {}
for xml in xmls:
tree = ET.parse(xml)
root = tree.getroot()
case_number = int(root.find('number').text)
data['Number'].append(case_number)
if root.find('age').text:
data['Age'].append(int(root.find('age').text))
else:
data['Age'].append(root.find('age').text)
data['Sex'].append(root.find('sex').text)
data['Composition'].append(root.find('composition').text)
data['Echogenicity'].append(root.find('echogenicity').text)
data['Margins'].append(root.find('margins').text)
data['Calcifications'].append(root.find('calcifications').text)
data['Tirads'].append(root.find('tirads').text)
data['Reportbacaf'].append(root.find('reportbacaf').text)
data['Reporteco'].append(root.find('reporteco').text)
for mark in root.findall('mark'):
image_idx = mark.find('image').text
svg_strings[f'{case_number}_{image_idx}'] = mark.find('svg').text
df = pd.DataFrame(data)
df.sort_values(by='Number', inplace=True)
df.set_index('Number', inplace=True)
Image.open(jpgs[0])
unique_dims = []
for jpg in jpgs:
dims = Image.open(jpg).size
if dims not in unique_dims:
unique_dims.append(dims)
image_size = unique_dims[0]
list(svg_strings.items())[0]
count = 0
corrupted_xmls = []
for svg_name, svg_str in svg_strings.items():
count += 1
img = Image.new('1', image_size)
draw = ImageDraw.Draw(img)
if svg_str is not None:
try:
svg_content = eval(svg_str)
except SyntaxError:
corrupted_xmls.append(svg_name)
for area in svg_content:
points = [(point['x'], point['y']) for point in area['points']]
draw.polygon(points, fill='white')
img.save(f'/kaggle/working/mask_{svg_name}.jpg')
corrupted_xmls | code |
128045913/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
print(f'{len(xmls)} xmls + {len(jpgs)} jpgs = {len(paths)} paths)') | code |
128045913/cell_7 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw
import os
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
Image.open(jpgs[0]) | code |
128045913/cell_8 | [
"text_plain_output_1.png"
] | from PIL import Image, ImageDraw
import os
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
Image.open(jpgs[0])
unique_dims = []
for jpg in jpgs:
dims = Image.open(jpg).size
if dims not in unique_dims:
unique_dims.append(dims)
print(unique_dims) | code |
128045913/cell_3 | [
"image_output_1.png"
] | from xml.dom import minidom
import os
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
paths = []
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
paths.append(os.path.join(dirname, filename))
xmls = []
jpgs = []
for path in paths:
if path.split('.')[-1] == 'xml':
xmls.append(path)
if path.split('.')[-1] == 'jpg':
jpgs.append(path)
tree = ET.parse(xmls[4])
rough_string = ET.tostring(tree.getroot(), 'utf')
reparsed = minidom.parseString(rough_string)
data = {'Number': [], 'Age': [], 'Sex': [], 'Composition': [], 'Echogenicity': [], 'Margins': [], 'Calcifications': [], 'Tirads': [], 'Reportbacaf': [], 'Reporteco': []}
svg_strings = {}
for xml in xmls:
tree = ET.parse(xml)
root = tree.getroot()
case_number = int(root.find('number').text)
data['Number'].append(case_number)
if root.find('age').text:
data['Age'].append(int(root.find('age').text))
else:
data['Age'].append(root.find('age').text)
data['Sex'].append(root.find('sex').text)
data['Composition'].append(root.find('composition').text)
data['Echogenicity'].append(root.find('echogenicity').text)
data['Margins'].append(root.find('margins').text)
data['Calcifications'].append(root.find('calcifications').text)
data['Tirads'].append(root.find('tirads').text)
data['Reportbacaf'].append(root.find('reportbacaf').text)
data['Reporteco'].append(root.find('reporteco').text)
for mark in root.findall('mark'):
image_idx = mark.find('image').text
svg_strings[f'{case_number}_{image_idx}'] = mark.find('svg').text
df = pd.DataFrame(data)
df.sort_values(by='Number', inplace=True)
df.set_index('Number', inplace=True)
df.head() | code |
128045913/cell_12 | [
"text_html_output_1.png"
] | (197, 205) | code |
105211362/cell_21 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df['Rating'].sort_values(ascending=False) | code |
105211362/cell_13 | [
"text_plain_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df['Type'].unique() | code |
105211362/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
print(df_genres) | code |
105211362/cell_25 | [
"image_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_rated8_movie = df[(df['Rating'] > 8) & (df['Type'] == 'Movie')]
df_rated8_movie[['Title', 'Rating']].sort_values(by='Rating', ascending=False) | code |
105211362/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.info() | code |
105211362/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_rated8_tv = df[(df['Rating'] > 8) & (df['Type'] == 'TV')]
df_rated8_tv[['Title', 'Rating']].sort_values(by='Rating', ascending=False) | code |
105211362/cell_29 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_rated8_special = df[(df['Rating'] > 8) & (df['Type'] == 'Special')]
df_rated8_special[['Title', 'Rating']].sort_values(by='Rating', ascending=False) | code |
105211362/cell_11 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
df_genres = df_genres[df_genres > 100]
fig = plt.figure(figsize=(10, 6))
ax = sns.barplot(x=df_genres.values, y=df_genres.index)
ax.set(ylabel='theme')
plt.show() | code |
105211362/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
df_genres = df_genres[df_genres > 100]
fig = plt.figure(figsize=(10,6))
ax = sns.barplot(x=df_genres.values, y=df_genres.index)
ax.set(ylabel='theme')
plt.show()
df_types = df['Type'].value_counts()
df_types
df_types = df_types[df_types > 100]
fig = plt.figure(figsize=(10, 4))
sns.barplot(x=df_types.values, y=df_types.index)
plt.show() | code |
105211362/cell_45 | [
"image_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_fans = df[['Title', 'Studio', 'Members']]
df_fans['Members'].unique() | code |
105211362/cell_18 | [
"image_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_types = df['Type'].value_counts()
df_types | code |
105211362/cell_32 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
df_genres = df_genres[df_genres > 100]
fig = plt.figure(figsize=(10,6))
ax = sns.barplot(x=df_genres.values, y=df_genres.index)
ax.set(ylabel='theme')
plt.show()
df_types = df['Type'].value_counts()
df_types
df_types = df_types[df_types > 100]
fig = plt.figure(figsize=(10,4))
sns.barplot(x=df_types.values, y=df_types.index)
plt.show()
df_rated8_tv = df[(df['Rating'] > 8) & (df['Type'] == 'TV')]
df_rated8_tv[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_movie = df[(df['Rating'] > 8) & (df['Type'] == 'Movie')]
df_rated8_movie[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_ova = df[(df['Rating'] > 8) & (df['Type'] == 'OVA')]
df_rated8_ova[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_special = df[(df['Rating'] > 8) & (df['Type'] == 'Special')]
df_rated8_special[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_tv_genres = df_rated8_tv[genres].sum().sort_values(ascending=False)
df_rated8_movie_genres = df_rated8_movie[genres].sum().sort_values(ascending=False)
df_rated8_ova_genres = df_rated8_ova[genres].sum().sort_values(ascending=False)
df_rated8_special_genres = df_rated8_special[genres].sum().sort_values(ascending=False)
df_rated8_genre_count = pd.DataFrame({'TV': df_rated8_tv_genres, 'Movie': df_rated8_movie_genres, 'OVA': df_rated8_ova_genres, 'Special': df_rated8_special_genres})
df_rated8_genre_count
fig = plt.figure(figsize=(9, 7))
sns.heatmap(df_rated8_genre_count, vmax=14, cmap='Blues', annot=True)
plt.show() | code |
105211362/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df[df['Type'] == '-'] | code |
105211362/cell_38 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
df_genres = df_genres[df_genres > 100]
fig = plt.figure(figsize=(10,6))
ax = sns.barplot(x=df_genres.values, y=df_genres.index)
ax.set(ylabel='theme')
plt.show()
df_types = df['Type'].value_counts()
df_types
df_types = df_types[df_types > 100]
fig = plt.figure(figsize=(10,4))
sns.barplot(x=df_types.values, y=df_types.index)
plt.show()
df_rated8_tv = df[(df['Rating'] > 8) & (df['Type'] == 'TV')]
df_rated8_tv[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_movie = df[(df['Rating'] > 8) & (df['Type'] == 'Movie')]
df_rated8_movie[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_ova = df[(df['Rating'] > 8) & (df['Type'] == 'OVA')]
df_rated8_ova[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_special = df[(df['Rating'] > 8) & (df['Type'] == 'Special')]
df_rated8_special[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_tv_genres = df_rated8_tv[genres].sum().sort_values(ascending=False)
df_rated8_movie_genres = df_rated8_movie[genres].sum().sort_values(ascending=False)
df_rated8_ova_genres = df_rated8_ova[genres].sum().sort_values(ascending=False)
df_rated8_special_genres = df_rated8_special[genres].sum().sort_values(ascending=False)
df_rated8_genre_count = pd.DataFrame({'TV': df_rated8_tv_genres, 'Movie': df_rated8_movie_genres, 'OVA': df_rated8_ova_genres, 'Special': df_rated8_special_genres})
df_rated8_genre_count
fig = plt.figure(figsize=(9,7))
sns.heatmap(df_rated8_genre_count, vmax=14, cmap="Blues", annot=True)
plt.show()
df_members = df[['Title', 'Rating', 'Studio', 'Members'] + genres]
df_members = df_members[df_members['Members'] >= 100000]
df_members.sort_values(by='Members', ascending=False).head(10)
df_members_rated8 = df_members[df_members['Rating'] > 8]
df_members_rated8.sort_values(by='Members', ascending=False).head(10)
df_members_rated8_genres = df_members_rated8[genres].sum().sort_values(ascending=False)
df_members_rated8_genres = df_members_rated8_genres[df_members_rated8_genres > 5]
fig = plt.figure(figsize=(10, 6))
ax = sns.barplot(x=df_members_rated8_genres.values, y=df_members_rated8_genres.index)
ax.set(ylabel='theme')
plt.show() | code |
105211362/cell_43 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_genres.sort_values(ascending=False, inplace=True)
df_genres = df_genres[df_genres > 100]
fig = plt.figure(figsize=(10,6))
ax = sns.barplot(x=df_genres.values, y=df_genres.index)
ax.set(ylabel='theme')
plt.show()
df_types = df['Type'].value_counts()
df_types
df_types = df_types[df_types > 100]
fig = plt.figure(figsize=(10,4))
sns.barplot(x=df_types.values, y=df_types.index)
plt.show()
df_rated8_tv = df[(df['Rating'] > 8) & (df['Type'] == 'TV')]
df_rated8_tv[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_movie = df[(df['Rating'] > 8) & (df['Type'] == 'Movie')]
df_rated8_movie[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_ova = df[(df['Rating'] > 8) & (df['Type'] == 'OVA')]
df_rated8_ova[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_special = df[(df['Rating'] > 8) & (df['Type'] == 'Special')]
df_rated8_special[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_tv_genres = df_rated8_tv[genres].sum().sort_values(ascending=False)
df_rated8_movie_genres = df_rated8_movie[genres].sum().sort_values(ascending=False)
df_rated8_ova_genres = df_rated8_ova[genres].sum().sort_values(ascending=False)
df_rated8_special_genres = df_rated8_special[genres].sum().sort_values(ascending=False)
df_rated8_genre_count = pd.DataFrame({'TV': df_rated8_tv_genres, 'Movie': df_rated8_movie_genres, 'OVA': df_rated8_ova_genres, 'Special': df_rated8_special_genres})
df_rated8_genre_count
fig = plt.figure(figsize=(9,7))
sns.heatmap(df_rated8_genre_count, vmax=14, cmap="Blues", annot=True)
plt.show()
df_members = df[['Title', 'Rating', 'Studio', 'Members'] + genres]
df_members = df_members[df_members['Members'] >= 100000]
df_members.sort_values(by='Members', ascending=False).head(10)
df_members_rated8 = df_members[df_members['Rating'] > 8]
df_members_rated8.sort_values(by='Members', ascending=False).head(10)
df_members_rated8_genres = df_members_rated8[genres].sum().sort_values(ascending=False)
df_members_rated8_genres = df_members_rated8_genres[df_members_rated8_genres>5]
fig = plt.figure(figsize=(10,6))
ax = sns.barplot(x=df_members_rated8_genres.values, y=df_members_rated8_genres.index)
ax.set(ylabel='theme')
plt.show()
df_studio = df['Studio'].value_counts()
df_studio = df_studio[df_studio > 10]
df_studio.drop(['Detective', 'Adult Cast'], inplace=True)
fig = plt.figure(figsize=(10, 6))
sns.barplot(x=df_studio.values, y=df_studio.index)
plt.show() | code |
105211362/cell_31 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_rated8_tv = df[(df['Rating'] > 8) & (df['Type'] == 'TV')]
df_rated8_tv[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_movie = df[(df['Rating'] > 8) & (df['Type'] == 'Movie')]
df_rated8_movie[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_ova = df[(df['Rating'] > 8) & (df['Type'] == 'OVA')]
df_rated8_ova[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_special = df[(df['Rating'] > 8) & (df['Type'] == 'Special')]
df_rated8_special[['Title', 'Rating']].sort_values(by='Rating', ascending=False)
df_rated8_tv_genres = df_rated8_tv[genres].sum().sort_values(ascending=False)
df_rated8_movie_genres = df_rated8_movie[genres].sum().sort_values(ascending=False)
df_rated8_ova_genres = df_rated8_ova[genres].sum().sort_values(ascending=False)
df_rated8_special_genres = df_rated8_special[genres].sum().sort_values(ascending=False)
df_rated8_genre_count = pd.DataFrame({'TV': df_rated8_tv_genres, 'Movie': df_rated8_movie_genres, 'OVA': df_rated8_ova_genres, 'Special': df_rated8_special_genres})
df_rated8_genre_count | code |
105211362/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
df_rated8_ova = df[(df['Rating'] > 8) & (df['Type'] == 'OVA')]
df_rated8_ova[['Title', 'Rating']].sort_values(by='Rating', ascending=False) | code |
105211362/cell_37 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_members = df[['Title', 'Rating', 'Studio', 'Members'] + genres]
df_members = df_members[df_members['Members'] >= 100000]
df_members.sort_values(by='Members', ascending=False).head(10)
df_members_rated8 = df_members[df_members['Rating'] > 8]
df_members_rated8.sort_values(by='Members', ascending=False).head(10) | code |
105211362/cell_5 | [
"image_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns | code |
105211362/cell_36 | [
"text_html_output_1.png"
] | import pandas as pd
filepath = '../input/mysteryanimemanga/myanimelist-anime-mystery-detective-cleaned.csv'
df = pd.read_csv(filepath)
df.columns
genres = ['Gourmet', 'Sports', 'Adventure', 'Avant Garde', 'Supernatural', 'Suspense', 'Slice of Life', 'Sci-Fi', 'Horror', 'Comedy', 'Drama', 'Fantasy', 'Action']
df_genres = df[genres].sum()
df_members = df[['Title', 'Rating', 'Studio', 'Members'] + genres]
df_members = df_members[df_members['Members'] >= 100000]
df_members.sort_values(by='Members', ascending=False).head(10) | code |
17118075/cell_21 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts()
dataset = pd.get_dummies(df, columns=['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal'])
X = dataset.drop('target', axis=1)
y = df['target']
cross_val_score(KNeighborsClassifier(n_neighbors=15), X, y) | code |
17118075/cell_9 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/heart.csv')
df.shape
df.target.value_counts() | code |
17118075/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/heart.csv')
df.head() | code |
17118075/cell_34 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(X_train, y_train)
pred = lr.predict(X_test)
pred
lr.score(X_test, y_test) | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.