path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17123567/cell_10
[ "text_plain_output_1.png" ]
from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('hacker_news', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref = dataset_ref.table('full') table = client.get_table(table_ref) table.schema client.list_rows(table, max_results=5).to_dataframe()
code
130010025/cell_11
[ "text_plain_output_5.png", "text_plain_output_4.png", "text_plain_output_6.png", "text_plain_output_3.png", "text_plain_output_2.png", "text_plain_output_1.png" ]
from lightgbm import LGBMClassifier from sklearn import metrics from sklearn import model_selection from sklearn import preprocessing import numpy as np import os import pandas as pd pd.set_option('display.max_rows', 500) pd.set_option('display.max_columns', 500) class CFG: HOME_DIR = '/kaggle/input/icr-identify-age-related-conditions' SPLITS = 5 SEED = 2023 boosting_type = 'gbdt' ITERATION = 1000 BOOSTING_TYPE = 'dart' lgb_params = {'objective': 'binary', 'metric': None, 'boosting': BOOSTING_TYPE, 'learning_rate': 0.005, 'num_leaves': 5, 'feature_fraction': 0.5, 'bagging_fraction': 0.8, 'lambda_l1': 2, 'lambda_l2': 4, 'n_jobs': -1, 'seed': SEED} xgb_params = {'objective': 'binary:logistic', 'eval_metric': 'logloss', 'learning_rate': 0.005, 'max_depth': 4, 'colsample_bytree': 0.5, 'subsample': 0.8, 'eta': 0.03, 'gamma': 1.5, 'random_state': SEED} cat_params = {'learning_rate': 0.005, 'iterations': ITERATION, 'depth': 4, 'colsample_bylevel': 0.5, 'subsample': 0.8, 'l2_leaf_reg': 3, 'random_seed': SEED, 'auto_class_weights': 'Balanced'} cfg = CFG() train_data = pd.read_csv(os.path.join(cfg.HOME_DIR, 'train.csv')) test_data = pd.read_csv(os.path.join(cfg.HOME_DIR, 'test.csv')) greeks_data = pd.read_csv(os.path.join(cfg.HOME_DIR, 'greeks.csv')) sample_data = pd.read_csv(os.path.join(cfg.HOME_DIR, 'sample_submission.csv')) def rename_column(df): df = df.rename(columns={'BD ': 'BD', 'CD ': 'CD', 'CW ': 'CW', 'FD ': 'FD'}) return df def scale_features(train_df, test_df): scaler = preprocessing.StandardScaler() train_df[FEATURES] = scaler.fit_transform(train_df[FEATURES]) test_df[FEATURES] = scaler.transform(test_df[FEATURES]) return (train_df, test_df) ej_mapper = {'A': 1, 'B': 0} train_data['EJ'] = train_data.EJ.map(ej_mapper) test_data['EJ'] = test_data.EJ.map(ej_mapper) train_data = rename_column(train_data) test_data = rename_column(test_data) IDENTIFIER = 'Id' FEATURES = ['AB', 'AF', 'AH', 'AM', 'AR', 'AX', 'AY', 'AZ', 'BC', 'BD', 'BN', 'BP', 'BQ', 'BR', 'BZ', 'CB', 'CC', 'CD', 'CF', 'CH', 'CL', 'CR', 'CS', 'CU', 'CW', 'DA', 'DE', 'DF', 'DH', 'DI', 'DL', 'DN', 'DU', 'DV', 'DY', 'EB', 'EE', 'EG', 'EH', 'EJ', 'EL', 'EP', 'EU', 'FC', 'FD', 'FE', 'FI', 'FL', 'FR', 'FS', 'GB', 'GE', 'GF', 'GH', 'GI', 'GL'] TARGET = 'Class' skf = model_selection.StratifiedKFold(n_splits=cfg.SPLITS, shuffle=True, random_state=cfg.SEED) train_data = train_data.sample(frac=1) train_data['kfold'] = -99 train_data = train_data.reset_index(drop=True) for fold, (tidx, vidx) in enumerate(skf.split(train_data[FEATURES], train_data[TARGET])): train_data.loc[vidx, 'kfold'] = fold def train_classifier(train_data, test_data, model_, fold): df_train = train_data.query('kfold != @fold').reset_index(drop=True) df_valid = train_data.query('kfold == @fold').reset_index(drop=True) t_id = df_train[IDENTIFIER] X_train = df_train[FEATURES] y_train = df_train[TARGET] v_id = df_valid[IDENTIFIER] X_valid = df_valid[FEATURES] y_valid = df_valid[TARGET] v_test = test_data[IDENTIFIER] X_test = test_data[FEATURES] model_.fit(X_train, y_train) v_pred = model_.predict_proba(X_valid)[:, 1] test_pred = model_.predict_proba(X_test)[:, 1] score = metrics.log_loss(y_valid, v_pred) return (v_id, y_valid, v_pred, np.ones_like(v_pred, dtype='int8') * fold, test_pred) model_0 = LGBMClassifier(**cfg.lgb_params) oof_id = [] oof_tar = [] oof_pred = [] oof_fold = [] oof_sub = [] for fold in range(0, 5): v_id, y_valid, v_pred, f, test_pred = train_classifier(train_data, test_data, model_0, fold) oof_id.append(v_id) oof_tar.append(y_valid) oof_pred.append(v_pred) oof_fold.append(f) oof_sub.append(test_pred) oof = np.concatenate(oof_pred) true = np.concatenate(oof_tar) names = np.concatenate(oof_id).reshape(-1) folds = np.concatenate(oof_fold) log_loss = metrics.log_loss(true, oof) print('Overall OOF Log Loss with TTA = %.3f' % log_loss) df_oof = pd.DataFrame(dict(id=names, Class=true, pred=oof, fold=folds)) df_oof.to_csv('oof_lgb.csv', index=False) print(df_oof.head()) test_ids = test_data[IDENTIFIER].values.reshape(-1) test_score = np.mean(np.column_stack(oof_sub), axis=1) sub_oof = pd.DataFrame(dict(id=test_ids, Class=test_score)) sub_oof.to_csv('sub_lgb.csv', index=False) print(sub_oof.head())
code
34120476/cell_21
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,Conv2D,MaxPooling2D,BatchNormalization from keras.models import Sequential, Model from keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) def plt_dynamic(x, vy, ty, ax, colors=['b']): pass epoch = 25 batch = 32 num_classes = 3 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Dense(256, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(64, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax', kernel_initializer='glorot_normal')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) his = model.fit(X_train, y_train, batch_size=batch, epochs=epoch, verbose=1, validation_data=(X_cv, y_cv)) fig, ax = plt.subplots(1, 1) ax.set_xlabel('Epochs') ax.set_ylabel('Binary Cross Entropy') x = list(range(1, epoch + 1)) vy = his.history['val_loss'] ty = his.history['loss'] plt_dynamic(x, vy, ty, ax)
code
34120476/cell_13
[ "text_plain_output_1.png", "image_output_1.png" ]
X_cv.shape
code
34120476/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.preprocessing import image from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label) encoder = LabelEncoder() encoder.fit(Target) encoded_Target = encoder.transform(Target) encoded_Target = np_utils.to_categorical(encoded_Target) encoded_Target[0]
code
34120476/cell_25
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,Conv2D,MaxPooling2D,BatchNormalization from keras.models import Sequential, Model from keras.preprocessing import image from keras.utils import np_utils from sklearn.metrics import confusion_matrix from sklearn.preprocessing import LabelEncoder from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label) df = pd.DataFrame(Target, columns=['Labels']) encoder = LabelEncoder() encoder.fit(Target) encoded_Target = encoder.transform(Target) encoded_Target = np_utils.to_categorical(encoded_Target) encoder.classes_ X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) def plt_dynamic(x, vy, ty, ax, colors=['b']): pass epoch = 25 batch = 32 num_classes = 3 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Dense(256, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(64, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax', kernel_initializer='glorot_normal')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) his = model.fit(X_train, y_train, batch_size=batch, epochs=epoch, verbose=1, validation_data=(X_cv, y_cv)) #Plotting Train and Validation Loss fig,ax=plt.subplots(1,1) ax.set_xlabel('Epochs') ax.set_ylabel('Binary Cross Entropy') x=list(range(1,epoch+1)) vy=his.history['val_loss'] ty=his.history['loss'] plt_dynamic(x,vy,ty,ax) score = model.evaluate(X_test, y_test, verbose=0) y_pred = model.predict(X_test).round() encoder.classes_ x = confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1)) Cm_df = pd.DataFrame(x, index=encoder.classes_, columns=encoder.classes_) sns.set(font_scale=1.5, color_codes=True, palette='deep') sns.heatmap(Cm_df, annot=True, annot_kws={'size': 16}, fmt='d', cmap='YlGnBu') plt.ylabel('True Label') plt.xlabel('Predicted Label') plt.title('Confusion Matrix')
code
34120476/cell_4
[ "text_plain_output_1.png" ]
from keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape
code
34120476/cell_20
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,Conv2D,MaxPooling2D,BatchNormalization from keras.models import Sequential, Model X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) epoch = 25 batch = 32 num_classes = 3 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Dense(256, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(64, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax', kernel_initializer='glorot_normal')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) his = model.fit(X_train, y_train, batch_size=batch, epochs=epoch, verbose=1, validation_data=(X_cv, y_cv))
code
34120476/cell_6
[ "text_plain_output_1.png" ]
from keras.preprocessing import image from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label) df = pd.DataFrame(Target, columns=['Labels']) sns.countplot(df['Labels'])
code
34120476/cell_2
[ "text_plain_output_1.png" ]
import cv2 import os import keras import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from sklearn.metrics import confusion_matrix from keras.preprocessing import image from keras import models from keras import layers from keras import optimizers from keras import applications from keras.optimizers import Adam from keras.models import Sequential, Model from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D, Conv2D, MaxPooling2D, BatchNormalization from keras import backend as k from keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping from tqdm import tqdm from keras.optimizers import SGD from sklearn.model_selection import train_test_split from tensorflow.keras.utils import to_categorical from sklearn.preprocessing import LabelEncoder from keras.utils import np_utils from keras import backend as K
code
34120476/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,Conv2D,MaxPooling2D,BatchNormalization from keras.models import Sequential, Model X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) epoch = 25 batch = 32 num_classes = 3 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Dense(256, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(64, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax', kernel_initializer='glorot_normal')) model.summary()
code
34120476/cell_8
[ "text_plain_output_1.png" ]
from keras.preprocessing import image from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label) encoder = LabelEncoder() encoder.fit(Target) encoded_Target = encoder.transform(Target) encoded_Target = np_utils.to_categorical(encoded_Target) encoder.classes_
code
34120476/cell_3
[ "text_plain_output_1.png" ]
from keras.preprocessing import image import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) plt.imshow(image)
code
34120476/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) X_train[0]
code
34120476/cell_24
[ "text_plain_output_1.png" ]
from keras.preprocessing import image from keras.utils import np_utils from sklearn.preprocessing import LabelEncoder from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label) encoder = LabelEncoder() encoder.fit(Target) encoded_Target = encoder.transform(Target) encoded_Target = np_utils.to_categorical(encoded_Target) encoder.classes_ encoder.classes_
code
34120476/cell_14
[ "text_plain_output_1.png" ]
X_test.shape
code
34120476/cell_22
[ "text_plain_output_1.png" ]
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,Conv2D,MaxPooling2D,BatchNormalization from keras.models import Sequential, Model X_train.shape X_cv.shape X_test.shape img_width = img_height = 224 if K.image_data_format() == 'channels_first': input_shape = (3, img_width, img_height) X_train = X_train.reshape(X_train.shape[0], 3, img_width, img_height) X_cv = X_cv.reshape(X_cv.shape[0], 3, img_width, img_height) X_test = X_test.reshape(X_test.shape[0], 3, img_width, img_height) else: input_shape = (img_width, img_height, 3) X_train = X_train.reshape(X_train.shape[0], img_width, img_height, 3) X_cv = X_cv.reshape(X_cv.shape[0], img_width, img_height, 3) X_test = X_test.reshape(X_test.shape[0], img_width, img_height, 3) epoch = 25 batch = 32 num_classes = 3 model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(3, 3))) model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.25)) model.add(Dense(256, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.4)) model.add(Dense(128, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.3)) model.add(BatchNormalization()) model.add(Dense(64, activation='relu', kernel_initializer='he_normal')) model.add(Dropout(0.5)) model.add(Dense(num_classes, activation='softmax', kernel_initializer='glorot_normal')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='Adam', metrics=['accuracy']) his = model.fit(X_train, y_train, batch_size=batch, epochs=epoch, verbose=1, validation_data=(X_cv, y_cv)) score = model.evaluate(X_test, y_test, verbose=0) print('The test accuracy for the model is %f ' % (score[1] * 100))
code
34120476/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
X_train.shape
code
34120476/cell_5
[ "image_output_1.png" ]
from keras.preprocessing import image from tqdm import tqdm import cv2 import matplotlib.pyplot as plt import os import os import numpy as np import pandas as pd import os imagePaths = [] for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: if filename[-3:] == 'png': imagePaths.append(os.path.join(dirname, filename)) image = cv2.imread(imagePaths[0]) image.shape Data = [] Target = [] resize = 224 for imagePath in tqdm(imagePaths): label = imagePath.split(os.path.sep)[-2] image = cv2.imread(imagePath) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image = cv2.resize(image, (resize, resize)) / 255 Data.append(image) Target.append(label)
code
73093128/cell_21
[ "text_html_output_1.png" ]
from geopandas.tools import geocode import folium import numpy as np import pandas as pd import geopandas as gpd from geopandas.tools import geocode import folium agent = 'my_colls_app' def geo_short(location): """ Take address, cross-street, etc. and return geocoded point at which lat/long can conveniently accessed. Uses Nominatim. """ pt = geocode(location, provider='nominatim', user_agent=agent) return pt.geometry.iloc[0] def basemap_with_buffer(location, buffer_radius_miles): centerpoint = geo_short(location) basemap = folium.Map(location=[centerpoint.y, centerpoint.x], tiles='openstreetmap', zoom_start=15) buffer_radius_meters = buffer_radius_miles * 5280 / 3.28084 basemap_buffer = folium.Circle(location=[centerpoint.y, centerpoint.x], radius=buffer_radius_meters).add_to(basemap) return basemap patco_address = '100 Lees Ave, Collingswood, NJ 08108' patco_base = basemap_with_buffer(patco_address, 0.5) patco_base
code
73093128/cell_23
[ "text_html_output_1.png" ]
from geopandas.tools import geocode import folium import numpy as np import pandas as pd import numpy as np import pandas as pd import geopandas as gpd from geopandas.tools import geocode import folium agent = 'my_colls_app' ramp_path = '../input/rampdatalog/RampDataLog.xlsm' ramp_df = pd.read_excel(ramp_path, sheet_name='data') ramp_df.rename(columns={'Cross Street 1': 'CS_1', 'Cross Street 2': 'CS_2'}, inplace=True) ramp_df.replace(' NE', 'NE', inplace=True) ramp_df.Notes.fillna('None', inplace=True) import math def cs_comb_str(cs1, cs2): """ Take an intersection's cross streets as arguments and return a string of the complete location """ inter = '' suffixes = ['Ave', 'Ln', 'Terr'] if 'btw' in cs1: inter = cs1.split('btw')[0] + 'Ave btw ' + cs2 elif type(cs2) != str: inter = cs1 elif '(' in cs2: inter = cs1 + ' Ave and ' + cs2.split()[0] + ' Ave ' + cs2.split()[1] else: if not any([suf in cs1 for suf in suffixes]): cs1 += ' Ave' if not any([suf in cs2 for suf in suffixes]) and (not any([landmark in cs2 for landmark in ['alleyway', 'exit']])) and (len(cs2.split()[-1]) > 1): cs2 += ' Ave' inter = ' and '.join([cs1, cs2]) return inter ramp_df['Inter'] = ramp_df.apply(lambda row: cs_comb_str(row['CS_1'], row['CS_2']), axis=1) cols = ramp_df.columns.tolist() cols = cols[:3] + cols[-2:] + cols[3:-2] ramp_df = ramp_df[cols] ramp_df.to_csv('CollingswoodADA_LatLong_checkpoint.csv', index=False) ramp_df = pd.read_csv('../input/collingswoodada-clean-latlong/CollingswoodADA_LatLong_checkpoint.csv') def geo_short(location): """ Take address, cross-street, etc. and return geocoded point at which lat/long can conveniently accessed. Uses Nominatim. """ pt = geocode(location, provider='nominatim', user_agent=agent) return pt.geometry.iloc[0] def basemap_with_buffer(location, buffer_radius_miles): centerpoint = geo_short(location) basemap = folium.Map(location=[centerpoint.y, centerpoint.x], tiles='openstreetmap', zoom_start=15) buffer_radius_meters = buffer_radius_miles * 5280 / 3.28084 basemap_buffer = folium.Circle(location=[centerpoint.y, centerpoint.x], radius=buffer_radius_meters).add_to(basemap) return basemap patco_address = '100 Lees Ave, Collingswood, NJ 08108' patco_base = basemap_with_buffer(patco_address, 0.5) patco_base patco_base = basemap_with_buffer(patco_address, 0.5) for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): folium.Marker(location=[location.Lat, location.Long], tooltip=location.Inter).add_to(patco_base) patco_base from folium.plugins import MarkerCluster patco_base = basemap_with_buffer(patco_address, 0.5) marker_cluster = folium.plugins.MarkerCluster() for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): marker_cluster.add_child(folium.Marker([location.Lat, location.Long], tooltip=location.Inter)) patco_base.add_child(marker_cluster) patco_base
code
73093128/cell_1
[ "text_plain_output_1.png" ]
!pip install openpyxl
code
73093128/cell_16
[ "text_html_output_1.png" ]
import pandas as pd ramp_path = '../input/rampdatalog/RampDataLog.xlsm' ramp_df = pd.read_excel(ramp_path, sheet_name='data') ramp_df.rename(columns={'Cross Street 1': 'CS_1', 'Cross Street 2': 'CS_2'}, inplace=True) ramp_df.replace(' NE', 'NE', inplace=True) ramp_df.Notes.fillna('None', inplace=True) import math def cs_comb_str(cs1, cs2): """ Take an intersection's cross streets as arguments and return a string of the complete location """ inter = '' suffixes = ['Ave', 'Ln', 'Terr'] if 'btw' in cs1: inter = cs1.split('btw')[0] + 'Ave btw ' + cs2 elif type(cs2) != str: inter = cs1 elif '(' in cs2: inter = cs1 + ' Ave and ' + cs2.split()[0] + ' Ave ' + cs2.split()[1] else: if not any([suf in cs1 for suf in suffixes]): cs1 += ' Ave' if not any([suf in cs2 for suf in suffixes]) and (not any([landmark in cs2 for landmark in ['alleyway', 'exit']])) and (len(cs2.split()[-1]) > 1): cs2 += ' Ave' inter = ' and '.join([cs1, cs2]) return inter ramp_df['Inter'] = ramp_df.apply(lambda row: cs_comb_str(row['CS_1'], row['CS_2']), axis=1) cols = ramp_df.columns.tolist() cols = cols[:3] + cols[-2:] + cols[3:-2] ramp_df = ramp_df[cols] ramp_df.to_csv('CollingswoodADA_LatLong_checkpoint.csv', index=False) ramp_df = pd.read_csv('../input/collingswoodada-clean-latlong/CollingswoodADA_LatLong_checkpoint.csv') ramp_df.head()
code
73093128/cell_24
[ "text_html_output_1.png" ]
from geopandas.tools import geocode import folium import numpy as np import pandas as pd import numpy as np import pandas as pd import geopandas as gpd from geopandas.tools import geocode import folium agent = 'my_colls_app' ramp_path = '../input/rampdatalog/RampDataLog.xlsm' ramp_df = pd.read_excel(ramp_path, sheet_name='data') ramp_df.rename(columns={'Cross Street 1': 'CS_1', 'Cross Street 2': 'CS_2'}, inplace=True) ramp_df.replace(' NE', 'NE', inplace=True) ramp_df.Notes.fillna('None', inplace=True) import math def cs_comb_str(cs1, cs2): """ Take an intersection's cross streets as arguments and return a string of the complete location """ inter = '' suffixes = ['Ave', 'Ln', 'Terr'] if 'btw' in cs1: inter = cs1.split('btw')[0] + 'Ave btw ' + cs2 elif type(cs2) != str: inter = cs1 elif '(' in cs2: inter = cs1 + ' Ave and ' + cs2.split()[0] + ' Ave ' + cs2.split()[1] else: if not any([suf in cs1 for suf in suffixes]): cs1 += ' Ave' if not any([suf in cs2 for suf in suffixes]) and (not any([landmark in cs2 for landmark in ['alleyway', 'exit']])) and (len(cs2.split()[-1]) > 1): cs2 += ' Ave' inter = ' and '.join([cs1, cs2]) return inter ramp_df['Inter'] = ramp_df.apply(lambda row: cs_comb_str(row['CS_1'], row['CS_2']), axis=1) cols = ramp_df.columns.tolist() cols = cols[:3] + cols[-2:] + cols[3:-2] ramp_df = ramp_df[cols] ramp_df.to_csv('CollingswoodADA_LatLong_checkpoint.csv', index=False) ramp_df = pd.read_csv('../input/collingswoodada-clean-latlong/CollingswoodADA_LatLong_checkpoint.csv') def geo_short(location): """ Take address, cross-street, etc. and return geocoded point at which lat/long can conveniently accessed. Uses Nominatim. """ pt = geocode(location, provider='nominatim', user_agent=agent) return pt.geometry.iloc[0] def basemap_with_buffer(location, buffer_radius_miles): centerpoint = geo_short(location) basemap = folium.Map(location=[centerpoint.y, centerpoint.x], tiles='openstreetmap', zoom_start=15) buffer_radius_meters = buffer_radius_miles * 5280 / 3.28084 basemap_buffer = folium.Circle(location=[centerpoint.y, centerpoint.x], radius=buffer_radius_meters).add_to(basemap) return basemap patco_address = '100 Lees Ave, Collingswood, NJ 08108' patco_base = basemap_with_buffer(patco_address, 0.5) patco_base patco_base = basemap_with_buffer(patco_address, 0.5) for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): folium.Marker(location=[location.Lat, location.Long], tooltip=location.Inter).add_to(patco_base) patco_base from folium.plugins import MarkerCluster patco_base = basemap_with_buffer(patco_address, 0.5) marker_cluster = folium.plugins.MarkerCluster() for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): marker_cluster.add_child(folium.Marker([location.Lat, location.Long], tooltip=location.Inter)) patco_base.add_child(marker_cluster) patco_base patco_base = basemap_with_buffer(patco_address, 0.5) for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): color = 'green' if location.Compliance == 'Y' else 'red' folium.Circle(location=[location.Lat, location.Long], radius=10, color=color, tooltip=location.Inter).add_to(patco_base) patco_base
code
73093128/cell_22
[ "text_html_output_1.png" ]
from geopandas.tools import geocode import folium import numpy as np import pandas as pd import numpy as np import pandas as pd import geopandas as gpd from geopandas.tools import geocode import folium agent = 'my_colls_app' ramp_path = '../input/rampdatalog/RampDataLog.xlsm' ramp_df = pd.read_excel(ramp_path, sheet_name='data') ramp_df.rename(columns={'Cross Street 1': 'CS_1', 'Cross Street 2': 'CS_2'}, inplace=True) ramp_df.replace(' NE', 'NE', inplace=True) ramp_df.Notes.fillna('None', inplace=True) import math def cs_comb_str(cs1, cs2): """ Take an intersection's cross streets as arguments and return a string of the complete location """ inter = '' suffixes = ['Ave', 'Ln', 'Terr'] if 'btw' in cs1: inter = cs1.split('btw')[0] + 'Ave btw ' + cs2 elif type(cs2) != str: inter = cs1 elif '(' in cs2: inter = cs1 + ' Ave and ' + cs2.split()[0] + ' Ave ' + cs2.split()[1] else: if not any([suf in cs1 for suf in suffixes]): cs1 += ' Ave' if not any([suf in cs2 for suf in suffixes]) and (not any([landmark in cs2 for landmark in ['alleyway', 'exit']])) and (len(cs2.split()[-1]) > 1): cs2 += ' Ave' inter = ' and '.join([cs1, cs2]) return inter ramp_df['Inter'] = ramp_df.apply(lambda row: cs_comb_str(row['CS_1'], row['CS_2']), axis=1) cols = ramp_df.columns.tolist() cols = cols[:3] + cols[-2:] + cols[3:-2] ramp_df = ramp_df[cols] ramp_df.to_csv('CollingswoodADA_LatLong_checkpoint.csv', index=False) ramp_df = pd.read_csv('../input/collingswoodada-clean-latlong/CollingswoodADA_LatLong_checkpoint.csv') def geo_short(location): """ Take address, cross-street, etc. and return geocoded point at which lat/long can conveniently accessed. Uses Nominatim. """ pt = geocode(location, provider='nominatim', user_agent=agent) return pt.geometry.iloc[0] def basemap_with_buffer(location, buffer_radius_miles): centerpoint = geo_short(location) basemap = folium.Map(location=[centerpoint.y, centerpoint.x], tiles='openstreetmap', zoom_start=15) buffer_radius_meters = buffer_radius_miles * 5280 / 3.28084 basemap_buffer = folium.Circle(location=[centerpoint.y, centerpoint.x], radius=buffer_radius_meters).add_to(basemap) return basemap patco_address = '100 Lees Ave, Collingswood, NJ 08108' patco_base = basemap_with_buffer(patco_address, 0.5) patco_base patco_base = basemap_with_buffer(patco_address, 0.5) for i, location in ramp_df.iterrows(): if not np.isnan(location.Lat) and (not np.isnan(location.Long)): folium.Marker(location=[location.Lat, location.Long], tooltip=location.Inter).add_to(patco_base) patco_base
code
18161386/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import os print(os.listdir('../input'))
code
18161386/cell_3
[ "image_output_11.png", "text_plain_output_100.png", "image_output_98.png", "text_plain_output_201.png", "text_plain_output_84.png", "text_plain_output_56.png", "text_plain_output_158.png", "image_output_74.png", "text_plain_output_181.png", "text_plain_output_137.png", "text_plain_output_139.png", "text_plain_output_35.png", "text_plain_output_130.png", "image_output_82.png", "text_plain_output_117.png", "image_output_24.png", "text_plain_output_98.png", "text_plain_output_195.png", "text_plain_output_43.png", "image_output_46.png", "text_plain_output_187.png", "text_plain_output_78.png", "text_plain_output_143.png", "text_plain_output_106.png", "text_plain_output_37.png", "text_plain_output_138.png", "text_plain_output_192.png", "image_output_85.png", "text_plain_output_184.png", "text_plain_output_172.png", "text_plain_output_147.png", "text_plain_output_90.png", "text_plain_output_79.png", "image_output_25.png", "text_plain_output_5.png", "text_plain_output_75.png", "text_plain_output_48.png", "text_plain_output_116.png", "image_output_77.png", "text_plain_output_128.png", "text_plain_output_30.png", "text_plain_output_167.png", "text_plain_output_73.png", "text_plain_output_126.png", "image_output_47.png", "text_plain_output_115.png", "text_plain_output_15.png", "text_plain_output_133.png", "text_plain_output_198.png", "text_plain_output_178.png", "image_output_78.png", "text_plain_output_154.png", "image_output_17.png", "text_plain_output_114.png", "text_plain_output_157.png", "image_output_30.png", "text_plain_output_70.png", "text_plain_output_9.png", "text_plain_output_44.png", "image_output_73.png", "text_plain_output_119.png", "text_plain_output_86.png", "image_output_72.png", "text_plain_output_118.png", "image_output_14.png", "image_output_59.png", "image_output_39.png", "image_output_97.png", "text_plain_output_131.png", "text_plain_output_40.png", "text_plain_output_123.png", "text_plain_output_74.png", "image_output_28.png", "text_plain_output_190.png", "text_plain_output_31.png", "text_plain_output_20.png", "image_output_86.png", "text_plain_output_102.png", "text_plain_output_111.png", "image_output_84.png", "image_output_81.png", "text_plain_output_101.png", "text_plain_output_169.png", "text_plain_output_144.png", "text_plain_output_161.png", "image_output_23.png", "text_plain_output_132.png", "text_plain_output_60.png", "image_output_34.png", "image_output_64.png", "text_plain_output_155.png", "text_plain_output_68.png", "text_plain_output_4.png", "text_plain_output_65.png", "text_plain_output_64.png", "text_plain_output_189.png", "text_plain_output_13.png", "text_plain_output_200.png", "text_plain_output_107.png", "image_output_13.png", "text_plain_output_52.png", "text_plain_output_66.png", "text_plain_output_45.png", "image_output_40.png", "image_output_5.png", "image_output_48.png", "image_output_68.png", "text_plain_output_171.png", "image_output_75.png", "text_plain_output_14.png", "image_output_18.png", "text_plain_output_159.png", "text_plain_output_32.png", "text_plain_output_88.png", "text_plain_output_29.png", "image_output_58.png", "text_plain_output_140.png", "text_plain_output_129.png", "text_plain_output_160.png", "text_plain_output_58.png", "image_output_92.png", "image_output_21.png", "text_plain_output_49.png", "text_plain_output_63.png", "text_plain_output_27.png", "text_plain_output_177.png", "image_output_52.png", "text_plain_output_76.png", "text_plain_output_108.png", "text_plain_output_54.png", "text_plain_output_142.png", "text_plain_output_10.png", "image_output_60.png", "text_plain_output_6.png", "image_output_7.png", "text_plain_output_153.png", "text_plain_output_170.png", "text_plain_output_92.png", "text_plain_output_57.png", "text_plain_output_120.png", "image_output_62.png", "image_output_96.png", "text_plain_output_24.png", "text_plain_output_21.png", "text_plain_output_104.png", "image_output_56.png", "image_output_31.png", "text_plain_output_47.png", "text_plain_output_121.png", "text_plain_output_25.png", "text_plain_output_134.png", "text_plain_output_77.png", "image_output_65.png", "image_output_20.png", "text_plain_output_18.png", "text_plain_output_183.png", "image_output_69.png", "text_plain_output_149.png", "text_plain_output_50.png", "text_plain_output_36.png", "image_output_32.png", "image_output_53.png", "text_plain_output_96.png", "text_plain_output_87.png", "text_plain_output_3.png", "image_output_4.png", "text_plain_output_180.png", "text_plain_output_141.png", "text_plain_output_112.png", "text_plain_output_152.png", "text_plain_output_191.png", "image_output_51.png", "text_plain_output_113.png", "text_plain_output_22.png", "text_plain_output_188.png", "text_plain_output_81.png", "text_plain_output_69.png", "image_output_83.png", "text_plain_output_175.png", "text_plain_output_165.png", "text_plain_output_146.png", "text_plain_output_145.png", "text_plain_output_125.png", "image_output_42.png", "image_output_35.png", "text_plain_output_197.png", "image_output_90.png", "text_plain_output_38.png", "image_output_41.png", "image_output_57.png", "text_plain_output_7.png", "text_plain_output_166.png", "image_output_36.png", "text_plain_output_91.png", "image_output_8.png", "image_output_37.png", "image_output_66.png", "text_plain_output_16.png", "text_plain_output_174.png", "image_output_16.png", "image_output_91.png", "text_plain_output_59.png", "image_output_70.png", "text_plain_output_103.png", "text_plain_output_71.png", "text_plain_output_8.png", "text_plain_output_122.png", "text_plain_output_182.png", "text_plain_output_26.png", "image_output_67.png", "image_output_27.png", "image_output_54.png", "text_plain_output_109.png", "image_output_6.png", "text_plain_output_41.png", "text_plain_output_34.png", "image_output_45.png", "text_plain_output_168.png", "text_plain_output_185.png", "text_plain_output_85.png", "text_plain_output_42.png", "image_output_63.png", "image_output_71.png", "text_plain_output_110.png", "text_plain_output_67.png", "text_plain_output_53.png", "image_output_80.png", "image_output_95.png", "text_plain_output_193.png", "text_plain_output_23.png", "text_plain_output_173.png", "text_plain_output_151.png", "text_plain_output_89.png", "image_output_93.png", "text_plain_output_51.png", "image_output_12.png", "text_plain_output_28.png", "text_plain_output_72.png", "text_plain_output_99.png", "text_plain_output_163.png", "text_plain_output_179.png", "image_output_22.png", "text_plain_output_162.png", "text_plain_output_136.png", "text_plain_output_2.png", "text_plain_output_127.png", "image_output_89.png", "image_output_55.png", "text_plain_output_196.png", "text_plain_output_97.png", "text_plain_output_1.png", "text_plain_output_33.png", "text_plain_output_150.png", "image_output_94.png", "text_plain_output_39.png", "image_output_3.png", "text_plain_output_176.png", "text_plain_output_186.png", "image_output_29.png", "text_plain_output_55.png", "text_plain_output_199.png", "text_plain_output_82.png", "image_output_44.png", "text_plain_output_93.png", "image_output_43.png", "text_plain_output_19.png", "image_output_2.png", "image_output_1.png", "text_plain_output_105.png", "text_plain_output_80.png", "image_output_10.png", "text_plain_output_94.png", "text_plain_output_164.png", "text_plain_output_124.png", "text_plain_output_17.png", "text_plain_output_148.png", "text_plain_output_11.png", "image_output_88.png", "text_plain_output_12.png", "image_output_33.png", "text_plain_output_194.png", "text_plain_output_62.png", "image_output_87.png", "image_output_50.png", "text_plain_output_95.png", "image_output_15.png", "image_output_99.png", "image_output_49.png", "image_output_100.png", "text_plain_output_156.png", "text_plain_output_61.png", "image_output_76.png", "image_output_9.png", "text_plain_output_83.png", "image_output_19.png", "image_output_79.png", "image_output_61.png", "image_output_38.png", "text_plain_output_135.png", "image_output_26.png", "text_plain_output_46.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data.csv') data.info()
code
18161386/cell_14
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/data.csv') data.drop(['id', 'Unnamed: 32'], axis=1, inplace=True) x_data = data.drop(['diagnosis'], axis=1) data.diagnosis = [1 if each == 'M' else 0 for each in data.diagnosis] x = ((x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data))).values y = data.diagnosis.values from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42) x_train = x_train.T x_test = x_test.T y_train = y_train.T y_test = y_test.T def initialize_parameters(dimension): theta0 = 0.0 thetaLeft = np.full((dimension, 1), 0.01) return (theta0, thetaLeft) def sigmoid(z): return 1 / (1 + np.exp(-z)) def forward_backward_propagation(theta0, thetaLeft, x_train, y_train): z = theta0 + np.dot(thetaLeft.T, x_train) y_head = sigmoid(z) cost_function = -y_train * np.log(y_head) - (1 - y_head) * np.log(1 - y_head) cost = np.sum(cost_function) / x_train.shape[1] derivative_theta0 = np.sum(y_head - y_train) / x_train.shape[1] derivative_thetaLeft = np.dot(x_train, (y_head - y_train).T) / x_train.shape[1] gradients = {'derivative_theta0': derivative_theta0, 'derivative_thetaLeft': derivative_thetaLeft} return (cost, gradients) def update(theta0, thetaLeft, x_train, y_train, learning_rate, number_of_iteration): cost_list = [] cost_list_to_plot = [] index_to_plot = [] for i in range(number_of_iteration): cost, gradients = forward_backward_propagation(theta0, thetaLeft, x_train, y_train) cost_list.append(cost) theta0 = theta0 - learning_rate * gradients['derivative_theta0'] thetaLeft = thetaLeft - learning_rate * gradients['derivative_thetaLeft'] if i % 50 == 0: cost_list_to_plot.append(cost) index_to_plot.append(i) parameters = {'theta0': theta0, 'thetaLeft': thetaLeft} plt.xticks(index_to_plot, rotation='vertical') return (parameters, gradients, cost_list) def predict(theta0, thetaLeft, x_test): z = sigmoid(theta0 + np.dot(thetaLeft.T, x_test)) Y_prediction = np.zeros((1, x_test.shape[1])) for i in range(z.shape[1]): if z[0, i] <= 0.5: Y_prediction[0, i] = 0 else: Y_prediction[0, i] = 1 return Y_prediction def logistic_regression(x_train, y_train, x_test, y_test, learning_rate, number_of_iteration): dimension = x_train.shape[0] theta0, thetaLeft = initialize_parameters(dimension) parameters, gradients, cost_list = update(theta0, thetaLeft, x_train, y_train, learning_rate, number_of_iteration) y_prediction_test = predict(parameters['theta0'], parameters['thetaLeft'], x_test) y_prediction_train = predict(parameters['theta0'], parameters['thetaLeft'], x_train) cost_list_global_train.append(100 - np.mean(np.abs(y_prediction_train - y_train)) * 100) cost_list_global_test.append(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100) logistic_regression(x_train, y_train, x_test, y_test, learning_rate=0.01, number_of_iteration=500)
code
32070024/cell_9
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax()
code
32070024/cell_23
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh() suicide.groupby('country').size() suicide.groupby('country').suicide_rate.mean().sort_values(ascending=False).head(10) Hungary = suicide.set_index('country').loc['Hungary'] Hungary suicide.groupby('year').suicide_rate.mean().plot.bar()
code
32070024/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh() suicide.groupby('country').size() suicide.groupby('country').suicide_rate.mean().sort_values(ascending=False).head(10) Hungary = suicide.set_index('country').loc['Hungary'] Hungary Hungary.groupby(['age', 'year', 'sex']).suicide_rate.mean().sort_values().tail(10).plot.barh()
code
32070024/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32070024/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh() suicide.groupby('country').size() suicide.groupby('country').suicide_rate.mean().sort_values(ascending=False).head(10) Hungary = suicide.set_index('country').loc['Hungary'] Hungary
code
32070024/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean()
code
32070024/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh() suicide.groupby('country').size() suicide.groupby('country').suicide_rate.mean().sort_values(ascending=False).head(10)
code
32070024/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide.head()
code
32070024/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh()
code
32070024/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist() suicide.groupby('sex').suicide_rate.mean() suicide.groupby('year').suicide_rate.mean().idxmax() suicide.groupby('age').suicide_rate.mean().sort_values().plot.barh() suicide.groupby('country').size()
code
32070024/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) suicide = pd.read_csv('/kaggle/input/who-suicide-statistics/who_suicide_statistics.csv') suicide['suicide_rate'] = suicide.suicides_no / suicide.population suicide.columns.tolist()
code
74052380/cell_21
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) y_test = y_test.astype('float64') y_train = y_train.astype('float64') classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) plt.plot(classifierHistory.history['accuracy']) plt.plot(classifierHistory.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
code
74052380/cell_4
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataset = pd.read_csv('/kaggle/input/heart-disease-uci/heart.csv') dataset.head()
code
74052380/cell_23
[ "image_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.preprocessing import StandardScaler import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) y_test = y_test.astype('float64') y_train = y_train.astype('float64') classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) plt.plot(classifierHistory.history['loss']) plt.plot(classifierHistory.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
code
74052380/cell_19
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) y_test = y_test.astype('float64') y_train = y_train.astype('float64') classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) scores = classifier.evaluate(X_test, y_test) print('Accuracy: %.2f%%' % (scores[1] * 100))
code
74052380/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
74052380/cell_15
[ "text_html_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) y_test = y_test.astype('float64') y_train = y_train.astype('float64') classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test))
code
74052380/cell_17
[ "text_plain_output_1.png" ]
from keras.layers import Dense from keras.layers import Dropout from keras.models import Sequential from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler() X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) y_test = y_test.astype('float64') y_train = y_train.astype('float64') classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test)) classifier = Sequential() classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=12, kernel_initializer='uniform', activation='relu')) classifier.add(Dropout(0.1)) classifier.add(Dense(units=1, kernel_initializer='uniform', activation='sigmoid')) classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']) classifierHistory = classifier.fit(X_train, y_train, batch_size=64, epochs=70, validation_data=(X_test, y_test))
code
16157465/cell_4
[ "image_output_2.png", "image_output_1.png" ]
import pandas as pd import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') def read_xyz(path, filename): return pd.read_csv(path + filename, skiprows=2, header=None, sep=' ', usecols=[0, 1, 2, 3], names=['atom', 'x', 'y', 'z']) path = '../input/structures/' filename = 'dsgdb9nsd_000001.xyz' read_xyz(path, filename)
code
16157465/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
""" atom_list = [] for filename in os.listdir("../input/structures"): atom_list = atom_list + list(read_xyz(path, filename)['atom']) atom_list = set(atom_list) print(atom_list) """ print("{'O', 'H', 'C', 'F', 'N'}")
code
16157465/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') train.head()
code
16157465/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') def read_xyz(path, filename): return pd.read_csv(path + filename, skiprows=2, header=None, sep=' ', usecols=[0, 1, 2, 3], names=['atom', 'x', 'y', 'z']) path = '../input/structures/' filename = 'dsgdb9nsd_000001.xyz' read_xyz(path, filename) x_list = [] y_list = [] z_list = [] for filename in os.listdir('../input/structures'): x_list = x_list + list(read_xyz(path, filename)['x']) y_list = y_list + list(read_xyz(path, filename)['y']) z_list = z_list + list(read_xyz(path, filename)['z']) dimfig, dimaxes = plt.subplots(3, 1, figsize=(6, 6)) sns.distplot(x_list, ax=dimaxes[0]) sns.distplot(y_list, ax=dimaxes[1]) sns.distplot(z_list, ax=dimaxes[2]) print('x max: ' + str(np.max(x_list)) + ' x min : ' + str(np.min(x_list))) print('y max: ' + str(np.max(y_list)) + ' y min : ' + str(np.min(y_list))) print('z max: ' + str(np.max(z_list)) + ' z min : ' + str(np.min(z_list)))
code
16157465/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') def read_xyz(path, filename): return pd.read_csv(path + filename, skiprows=2, header=None, sep=' ', usecols=[0, 1, 2, 3], names=['atom', 'x', 'y', 'z']) path = '../input/structures/' filename = 'dsgdb9nsd_000001.xyz' read_xyz(path, filename) x_list = [] y_list = [] z_list = [] for filename in os.listdir("../input/structures"): x_list = x_list + list(read_xyz(path, filename)['x']) y_list = y_list + list(read_xyz(path, filename)['y']) z_list = z_list + list(read_xyz(path, filename)['z']) dimfig, dimaxes = plt.subplots(3, 1, figsize = (6, 6)) sns.distplot(x_list, ax=dimaxes[0]) sns.distplot(y_list, ax=dimaxes[1]) sns.distplot(z_list, ax=dimaxes[2]) print("x max: " + str(np.max(x_list)) + " x min : " + str(np.min(x_list))) print("y max: " + str(np.max(y_list)) + " y min : " + str(np.min(y_list))) print("z max: " + str(np.max(z_list)) + " z min : " + str(np.min(z_list))) coupling_types = set(train['type']) coupling_types = list(coupling_types) totals = [np.sum(train['type'] == x) for x in coupling_types] subsets = dict() for x in coupling_types: subsets[x] = train.loc[train['type'] == x] bar_fig, bar_axis = plt.subplots() sns.barplot(coupling_types, totals, ax = bar_axis) dist_fig, dist_axes = plt.subplots(len(subsets), 1, figsize = (6, 12)) for (x, y) in zip(dist_axes, coupling_types): sns.distplot(subsets[y]['scalar_coupling_constant'], ax=x) x.set_title(y) dist_fig.tight_layout() def length(data, index1, index2): """Takes an xyz file imported by read_xyz and calculates the distance between two points""" return np.sqrt(np.sum(np.square(data[['x', 'y', 'z']].loc[index1] - data[['x', 'y', 'z']].loc[index2]))) def neighbours(data, index): """Takes an xyz file imported by read_xyz and calculates the number of neighbours within sqrt(3) Å of the indexed atom""" l2 = np.array([np.sum(np.square(data[['x', 'y', 'z']].loc[index] - data[['x', 'y', 'z']].loc[x])) for x in range(len(data))]) return np.sum(l2 < 3) - 1 def nearest(data, index): """Takes an xyz file imported by read_xyz and finds the index of the nearest atom""" point = data.loc[index][['x', 'y', 'z']] data = data[data['atom'] != 'H'][['x', 'y', 'z']] data[['x', 'y', 'z']] = data[['x', 'y', 'z']] - point data[['x', 'y', 'z']] = np.square(data[['x', 'y', 'z']]) data = np.sum(data, axis=1) if index in data.index: data[index] = 999 return np.argmin(data) def magnitude(vector): """Calculates the magnitude of a vector""" return np.sqrt(np.sum(np.square(vector))) def dihedral(point1, point2, point3, point4): """Calculates the dihederal angle between two bonds""" b1 = point1 - point2 b2 = point2 - point3 b3 = point3 - point4 n1 = np.cross(b1, b2) n1 = n1 / magnitude(n1) n2 = np.cross(b2, b3) n2 = n2 / magnitude(n2) m1 = np.cross(n1, b2 / magnitude(b2)) x = np.dot(n1, n2) y = np.dot(m1, n2) return np.arctan2(x, y) def single_bond(coupling_type): feature_list = [] for x in range(1000): current = subsets[coupling_type].iloc[x] index0 = current['atom_index_0'] index1 = current['atom_index_1'] filename = current['molecule_name'] + '.xyz' data = read_xyz(path, filename) feature_list.append((length(data, index0, index1), neighbours(data, index1), current['scalar_coupling_constant'])) return pd.DataFrame(feature_list, columns=['length', 'hybrid', 'coupling']) def two_bond(coupling_type): feature_list = [] for x in range(1000): current = subsets[coupling_type].iloc[x] data = read_xyz(path, current['molecule_name'] + '.xyz') index_0 = current['atom_index_0'] index_1 = current['atom_index_1'] shared = nearest(data, index_0) length1 = length(data, index_0, shared) length2 = length(data, index_1, shared) vector1 = data[['x', 'y', 'z']].loc[index_0] - data[['x', 'y', 'z']].loc[shared] vector2 = data[['x', 'y', 'z']].loc[index_1] - data[['x', 'y', 'z']].loc[shared] cosine = np.dot(vector1, vector2) / (length1 * length2) shared_hybrid = neighbours(data, shared) carbon_hybrid = neighbours(data, index_1) feature_list.append((length1, length2, cosine, data['atom'].iloc[shared], shared_hybrid, carbon_hybrid, current['scalar_coupling_constant'])) return pd.DataFrame(feature_list, columns=['length1', 'length2', 'cosine', 'atom', 'hybrid1', 'hybrid2', 'coupling']) def three_bond(coupling_type): feature_list = [] for x in range(1000): current = subsets[coupling_type].iloc[x] data = read_xyz(path, current['molecule_name'] + '.xyz') index_0 = current['atom_index_0'] index_1 = current['atom_index_1'] shared1 = nearest(data, index_0) shared2 = nearest(data, index_1) length1 = length(data, index_0, shared1) length2 = length(data, index_1, shared2) length_shared = length(data, index_0, index_1) cosine = dihedral(data[['x', 'y', 'z']].loc[index_0], data[['x', 'y', 'z']].loc[shared1], data[['x', 'y', 'z']].loc[shared2], data[['x', 'y', 'z']].loc[index_1]) shared1_hybrid = neighbours(data, shared1) shared2_hybrid = neighbours(data, shared2) terminal_hybrid = neighbours(data, index_1) feature_list.append((length1, length2, length_shared, cosine, data['atom'].iloc[shared1], data['atom'].iloc[shared2], shared1_hybrid, shared2_hybrid, terminal_hybrid, current['scalar_coupling_constant'])) return pd.DataFrame(feature_list, columns=['length1', 'length2', 'length_shared', 'angle', 'atom1', 'atom2', 'hybrid1', 'hybrid2', 'terminal_hybrid', 'coupling']) function_dict = {'1': single_bond, '2': two_bond, '3': three_bond} engineered = {x: function_dict[x[0]](x) for x in coupling_types}
code
16157465/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') coupling_types = set(train['type']) print(coupling_types)
code
16157465/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/train.csv') def read_xyz(path, filename): return pd.read_csv(path + filename, skiprows=2, header=None, sep=' ', usecols=[0, 1, 2, 3], names=['atom', 'x', 'y', 'z']) path = '../input/structures/' filename = 'dsgdb9nsd_000001.xyz' read_xyz(path, filename) x_list = [] y_list = [] z_list = [] for filename in os.listdir("../input/structures"): x_list = x_list + list(read_xyz(path, filename)['x']) y_list = y_list + list(read_xyz(path, filename)['y']) z_list = z_list + list(read_xyz(path, filename)['z']) dimfig, dimaxes = plt.subplots(3, 1, figsize = (6, 6)) sns.distplot(x_list, ax=dimaxes[0]) sns.distplot(y_list, ax=dimaxes[1]) sns.distplot(z_list, ax=dimaxes[2]) print("x max: " + str(np.max(x_list)) + " x min : " + str(np.min(x_list))) print("y max: " + str(np.max(y_list)) + " y min : " + str(np.min(y_list))) print("z max: " + str(np.max(z_list)) + " z min : " + str(np.min(z_list))) coupling_types = set(train['type']) coupling_types = list(coupling_types) totals = [np.sum(train['type'] == x) for x in coupling_types] subsets = dict() for x in coupling_types: subsets[x] = train.loc[train['type'] == x] bar_fig, bar_axis = plt.subplots() sns.barplot(coupling_types, totals, ax=bar_axis) dist_fig, dist_axes = plt.subplots(len(subsets), 1, figsize=(6, 12)) for x, y in zip(dist_axes, coupling_types): sns.distplot(subsets[y]['scalar_coupling_constant'], ax=x) x.set_title(y) dist_fig.tight_layout()
code
105187012/cell_42
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) X_train
code
105187012/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape data.head(10)
code
105187012/cell_25
[ "image_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) X.plot() plt.show()
code
105187012/cell_57
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn import linear_model Lasso_reg = linear_model.Lasso(alpha=50, max_iter=100, tol=0.1) Lasso_reg.fit(X_train, y_train)
code
105187012/cell_34
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit.n_features_ features = fit.transform(X) d = pd.DataFrame(features) d.hist(figsize=(10, 10)) plt.show()
code
105187012/cell_30
[ "image_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit.n_features_
code
105187012/cell_44
[ "image_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train)
code
105187012/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape
code
105187012/cell_29
[ "image_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit
code
105187012/cell_48
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) print('DecisionTreeClassifierModel Train Score is : ', classifier.score(X_train, y_train)) print('DecisionTreeClassifierModel Test Score is : ', classifier.score(X_test, y_test))
code
105187012/cell_54
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) from sklearn.neighbors import KNeighborsClassifier kNN = KNeighborsClassifier(n_neighbors=20) kNN.fit(X_train, y_train) print(kNN.score(X_train, y_train)) print(kNN.score(X_test, y_test))
code
105187012/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape
code
105187012/cell_50
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit.n_features_ features = fit.transform(X) d = pd.DataFrame(features) from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) import matplotlib.pyplot as plt importance = classifier.feature_importances_ for i, v in enumerate(importance): print('Feature: %0d, Score: %.5f' % (i, v)) plt.bar([x for x in range(len(importance))], importance) plt.show()
code
105187012/cell_52
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import mean_absolute_error forest_model = RandomForestClassifier(n_estimators=1000, max_depth=25) forest_model.fit(X_train, y_train) y_pred = forest_model.predict(X_test) print('RandomForestRegressor Train Score is : ', forest_model.score(X_train, y_train)) print('RandomForestRegressor Test Score is : ', forest_model.score(X_test, y_test))
code
105187012/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.info()
code
105187012/cell_49
[ "text_plain_output_1.png" ]
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) recall_score(y_test, y_pred)
code
105187012/cell_32
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit.n_features_ features = fit.transform(X) print(features[0:5, :])
code
105187012/cell_59
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) from sklearn import linear_model Lasso_reg = linear_model.Lasso(alpha=50, max_iter=100, tol=0.1) Lasso_reg.fit(X_train, y_train) Lasso_reg.score(X_train, y_train) Lasso_reg.score(X_test, y_test)
code
105187012/cell_58
[ "text_plain_output_1.png" ]
from sklearn import linear_model from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn import linear_model Lasso_reg = linear_model.Lasso(alpha=50, max_iter=100, tol=0.1) Lasso_reg.fit(X_train, y_train) Lasso_reg.score(X_train, y_train)
code
105187012/cell_28
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) print('Num Features: %s' % fit.n_features_) print('Selected Features: %s' % fit.support_) print('Feature Ranking: %s' % fit.ranking_)
code
105187012/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique()
code
105187012/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape data.plot() plt.show()
code
105187012/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape data['HeartDisease'].value_counts().plot(kind='bar')
code
105187012/cell_38
[ "text_plain_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.feature_selection import RFE from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) model = LogisticRegression() rfe = RFE(model, n_features_to_select=5) fit = rfe.fit(X, y) fit.n_features_ features = fit.transform(X) d = pd.DataFrame(features) X_train_d = pd.DataFrame(X_train) X_train_d.head()
code
105187012/cell_47
[ "text_html_output_1.png" ]
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score cm = confusion_matrix(y_test, y_pred) print(cm) accuracy_score(y_test, y_pred)
code
105187012/cell_24
[ "text_html_output_1.png" ]
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OneHotEncoder import matplotlib.pyplot as plt import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum() data.shape X = data.iloc[:, :-1] y = data.iloc[:, -1] ct1 = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1, 2, 6, 8, 10])], remainder='passthrough') X = ct1.fit_transform(X) X = pd.DataFrame(X) X.hist(figsize=(15, 15)) plt.show()
code
105187012/cell_53
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.tree import DecisionTreeClassifier import numpy as np from sklearn.preprocessing import StandardScaler sc = StandardScaler(with_mean=False) X_train = sc.fit_transform(X_train) X_test = sc.transform(X_test) from sklearn.tree import DecisionTreeClassifier classifier = DecisionTreeClassifier(criterion='entropy', random_state=0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) y_test = np.array(y_test) y_pred = np.array(y_pred) from sklearn.linear_model import LogisticRegression lr = LogisticRegression() lr.fit(X_train, y_train) print(lr.score(X_train, y_train)) print(lr.score(X_test, y_test))
code
105187012/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/heart-failure-prediction/heart.csv') data.shape data.nunique() data.dropna(inplace=True) data.isnull().sum()
code
73081571/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([71, 67, 78, 90], index=('Q1', 'Q2', 'Q3', 'Q4')) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([77, 65, 34, 93], index=('Math', 'Science', 'English', 'Business')) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['A', 'B', 'C', 'D', 'E'], 'Value': [4, 2, 5, 10, 9]}) plt.barh(y=df.Group, width=df.Value) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['Students', 'Teachers', 'Supervisors', 'Employees', 'Assistants'], 'Value': [2000, 300, 54, 450, 23]}) import pandas as pd value = pd.DataFrame({'Length': [3.4, 6.39, 3.2, 6.5, 1.3], 'Width': [7.6, 3.6, 0.45, 23.5, 3.2]}) hist = value.hist(bins=5)
code
73081571/cell_26
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([71, 67, 78, 90], index=('Q1', 'Q2', 'Q3', 'Q4')) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([77, 65, 34, 93], index=('Math', 'Science', 'English', 'Business')) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['A', 'B', 'C', 'D', 'E'], 'Value': [4, 2, 5, 10, 9]}) plt.barh(y=df.Group, width=df.Value) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['Students', 'Teachers', 'Supervisors', 'Employees', 'Assistants'], 'Value': [2000, 300, 54, 450, 23]}) import pandas as pd # Creating a Data frame value = pd.DataFrame({ 'Length': [3.40, 6.39, 3.20, 6.50, 1.3], 'Width': [7.6, 3.6, 0.45, 23.5, 3.2] }) # Creating Histograms of columns 'Length' and 'Width' using the pandas histogram function : .hist() # function hist = value.hist(bins=5) import pandas as pd values = pd.DataFrame({'Length': [2.7, 8.7, 3.4, 2.4, 1.9, 3.4, 5.6], 'Breadth': [4.24, 2.67, 7.6, 7.1, 4.9, 6.5, 3.4]}) import pandas as pd nba_champions = pd.Series(index=[2015, 2016, 2017, 2018, 2019, 2021], data=['Golden State Warriors', 'Golden State Warriors', 'Golden State Warriors', 'Toronto Raptors', 'Los Angeles Lakers', 'Milwaukee Bucks'], name='Winners') print(nba_champions) nba_champions_counter = nba_champions.value_counts() print(nba_champions_counter) nba_champions_counter.plot(kind='pie')
code
73081571/cell_7
[ "image_output_1.png" ]
import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) weekly_calory_count.plot('Days', 'Calories') print(weekly_calory_count)
code
73081571/cell_16
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([71, 67, 78, 90], index=('Q1', 'Q2', 'Q3', 'Q4')) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([77, 65, 34, 93], index=('Math', 'Science', 'English', 'Business')) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['A', 'B', 'C', 'D', 'E'], 'Value': [4, 2, 5, 10, 9]}) plt.barh(y=df.Group, width=df.Value)
code
73081571/cell_31
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([71, 67, 78, 90], index=('Q1', 'Q2', 'Q3', 'Q4')) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([77, 65, 34, 93], index=('Math', 'Science', 'English', 'Business')) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['A', 'B', 'C', 'D', 'E'], 'Value': [4, 2, 5, 10, 9]}) plt.barh(y=df.Group, width=df.Value) import pandas as pd import matplotlib.pyplot as plt df = pd.DataFrame({'Group': ['Students', 'Teachers', 'Supervisors', 'Employees', 'Assistants'], 'Value': [2000, 300, 54, 450, 23]}) import pandas as pd # Creating a Data frame value = pd.DataFrame({ 'Length': [3.40, 6.39, 3.20, 6.50, 1.3], 'Width': [7.6, 3.6, 0.45, 23.5, 3.2] }) # Creating Histograms of columns 'Length' and 'Width' using the pandas histogram function : .hist() # function hist = value.hist(bins=5) import pandas as pd values = pd.DataFrame({'Length': [2.7, 8.7, 3.4, 2.4, 1.9, 3.4, 5.6], 'Breadth': [4.24, 2.67, 7.6, 7.1, 4.9, 6.5, 3.4]}) import pandas as pd nba_champions = pd.Series(index=[2015, 2016, 2017, 2018, 2019, 2021], data=['Golden State Warriors', 'Golden State Warriors', 'Golden State Warriors', 'Toronto Raptors', 'Los Angeles Lakers', 'Milwaukee Bucks'], name='Winners') nba_champions_counter = nba_champions.value_counts() import pandas as pd wimbledon_winners = pd.Series(index=[2015, 2016, 2017, 2018, 2019], data=['Novak Djokovic', 'Andy Murray', 'Roger Federer', 'Novak Djokovic', 'Novak Djokovic'], name='Winners') import pandas as pd data = {'Name': ['Ben', 'Sally', 'Joseph', 'Penny', 'Jackson', 'Elizabeth'], 'Age': [20, 18, 27, 50, 12, 15]} df = pd.DataFrame(data=data) df.plot.scatter(x='Name', y='Age', s=100)
code
73081571/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd import pandas as pd import pandas as pd days_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] calory_intake = [2385, 1543, 1293, 2019, 4201, 1203, 2309] weekly_calory_count = pd.DataFrame({'Days': days_of_week, 'Calories': calory_intake}) import pandas as pd import matplotlib.pyplot as plt ser = pd.Series([71, 67, 78, 90], index=('Q1', 'Q2', 'Q3', 'Q4')) ser.plot.bar(rot=5, title='Quarterly Sales(in Millions)') plt.show(block=True)
code
90138608/cell_9
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.columns sns.pairplot(df)
code
90138608/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.info()
code
90138608/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.columns len(df)
code
90138608/cell_2
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.head()
code
90138608/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90138608/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.columns sns.catplot(x='Happiness levels(Country)', y='City', kind='bar', data=df.nlargest(10, 'Happiness levels(Country)'))
code
90138608/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.columns sns.kdeplot(x='Life expectancy(years) (Country)', data=df, shade=True)
code
90138608/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.tail()
code
90138608/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('../input/healthy-lifestyle-cities-report-2021/healthy_lifestyle_city_2021.csv') df = data.copy() df.columns
code
72088106/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os def getFiles(): """ Dictonary to get the right Files""" dict = {} for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: dict[filename[0:filename.find('.')]] = pd.read_csv(os.path.join(dirname, filename)) return dict def preprocessing(): """ Provide the training data for the model. Define the features (X) and the target(y) 'target'.""" dict = getFiles() train_data = dict['train'] y = train_data.target train_features = train_data.columns[1:-1] X = train_data[train_features] return (X, y) train_data.head()
code
72088106/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
72088106/cell_5
[ "text_html_output_1.png" ]
import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import os def getFiles(): """ Dictonary to get the right Files""" dict = {} for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: dict[filename[0:filename.find('.')]] = pd.read_csv(os.path.join(dirname, filename)) return dict def preprocessing(): """ Provide the training data for the model. Define the features (X) and the target(y) 'target'.""" dict = getFiles() train_data = dict['train'] y = train_data.target train_features = train_data.columns[1:-1] X = train_data[train_features] return (X, y) X.head()
code
2035143/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd from sklearn import preprocessing, cross_validation, neighbors from keras.models import Sequential from keras.layers import Dense from keras.utils import to_categorical from sklearn import tree import graphviz from sklearn.model_selection import cross_val_score df = pd.read_csv('../input/glass.csv') print(df.head())
code