path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
18153807/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') object_list = train.select_dtypes(include=['object']).columns display(train[object_list].sample(10).T) for f in object_list: print('Unique in column ', f, ' is -> ', len(train[f].unique())) float_list = train.select_dtypes(include=['float64']).columns display(train[float_list].sample(10).T) int_list = train.select_dtypes(include=['int64']).columns one_columns = [] for f in int_list: if len(train[f].unique()) == 1: one_columns.append(f) train.drop(columns=one_columns, inplace=True) test.drop(columns=one_columns, inplace=True)
code
18153807/cell_6
[ "text_html_output_2.png", "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') object_list = train.select_dtypes(include=['object']).columns float_list = train.select_dtypes(include=['float64']).columns int_list = train.select_dtypes(include=['int64']).columns one_columns = [] for f in int_list: if len(train[f].unique()) == 1: one_columns.append(f) train.drop(columns=one_columns, inplace=True) test.drop(columns=one_columns, inplace=True) for f in object_list: le = LabelEncoder() le.fit(list(train[f].values) + list(test[f].values)) train[f] = le.transform(list(train[f].values)) test[f] = le.transform(list(test[f].values)) Y = train['y'] train.drop(columns=['y'], inplace=True, axis=1) combine = pd.concat([train, test]) for f in object_list: temp = pd.get_dummies(combine[f]) combine = pd.concat([combine, temp], axis=1) train = combine[:train.shape[0]] test = combine[train.shape[0]:] print(train.shape) print(test.shape) print(Y.shape) train_columns = train.columns
code
18153807/cell_1
[ "text_plain_output_1.png" ]
import os import warnings import numpy as np import pandas as pd import os print(os.listdir('../input')) import warnings warnings.filterwarnings('ignore')
code
18153807/cell_8
[ "text_html_output_1.png" ]
from sklearn.preprocessing import LabelEncoder import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') object_list = train.select_dtypes(include=['object']).columns float_list = train.select_dtypes(include=['float64']).columns int_list = train.select_dtypes(include=['int64']).columns one_columns = [] for f in int_list: if len(train[f].unique()) == 1: one_columns.append(f) train.drop(columns=one_columns, inplace=True) test.drop(columns=one_columns, inplace=True) for f in object_list: le = LabelEncoder() le.fit(list(train[f].values) + list(test[f].values)) train[f] = le.transform(list(train[f].values)) test[f] = le.transform(list(test[f].values)) Y = train['y'] train.drop(columns=['y'], inplace=True, axis=1) combine = pd.concat([train, test]) for f in object_list: temp = pd.get_dummies(combine[f]) combine = pd.concat([combine, temp], axis=1) train = combine[:train.shape[0]] test = combine[train.shape[0]:] train_columns = train.columns def df_column_uniquify(df): df_columns = df.columns new_columns = [] for item in df_columns: counter = 0 newitem = item while newitem in new_columns: counter += 1 newitem = '{}_{}'.format(item, counter) new_columns.append(newitem) df.columns = new_columns return df train = df_column_uniquify(train) test = df_column_uniquify(test) train['y'] = Y original_col = list(test.drop(columns=object_list).columns) display(train.head()) display(test.head())
code
130011087/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df = coffee_code_df.dropna(how='any') sns.scatterplot(data=coffee_code_df, x='CodingHours', y='CoffeeCupsPerDay') plt.xlabel('Coding Hours') plt.ylabel('Coffee Cups per Day') plt.title('Coding Hours vs Coffee Cups per Day') plt.show()
code
130011087/cell_11
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df = coffee_code_df.dropna(how='any') sns.countplot(data=coffee_code_df, x='Gender') plt.xlabel('Gender') plt.ylabel('Count') plt.title('Gender Distribution') plt.show()
code
130011087/cell_19
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df = coffee_code_df.dropna(how='any') plt.xticks(rotation=45) sns.boxplot(data=coffee_code_df, x='AgeRange', y='CodingHours') plt.xlabel('Age Range') plt.ylabel('Coding Hours') plt.title('Distribution of Coding Hours by Age Range') plt.show()
code
130011087/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression import seaborn as sns import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
130011087/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df.describe() coffee_code_df.info()
code
130011087/cell_15
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df = coffee_code_df.dropna(how='any') sns.countplot(data=coffee_code_df, x='CoffeeTime') plt.xlabel('Coffee Time') plt.ylabel('Frequency') plt.title('Frequency of Coffee Time') plt.xticks(rotation=45) plt.show()
code
130011087/cell_17
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df = coffee_code_df.dropna(how='any') plt.xticks(rotation=45) sns.barplot(data=coffee_code_df, x='Gender', y='CoffeeCupsPerDay') plt.xlabel('Gender') plt.ylabel('Average Coffee Cups per Day') plt.title('Average Coffee Cups per Day by Gender') plt.show()
code
130011087/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) coffee_code_df = pd.read_csv('/kaggle/input/coffee-and-code-dataset/CoffeeAndCodeLT2018 - CoffeeAndCodeLT2018.csv') coffee_code_df.head(5)
code
18142262/cell_4
[ "text_html_output_1.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import re import nltk import spacy full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000) df = full_df[['text']] df['text_lower'] = df['text'].str.lower() df.head()
code
18142262/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import re import nltk import spacy full_df = pd.read_csv('../input/twcs/twcs.csv', nrows=5000) df = full_df[['text']] full_df.head()
code
33120214/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33120214/cell_12
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from datetime import timedelta from matplotlib.dates import WeekdayLocator, DateFormatter import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) covid19 = pd.read_csv('/kaggle/input/hospital-resources-during-covid19-pandemic/Hospitalization_all_locs.csv', parse_dates=['date'], usecols=['location_name', 'date', 'allbed_mean', 'ICUbed_mean', 'InvVen_mean']) covid19.rename(columns={'location_name': 'state'}, inplace=True) states_list = ['New York', 'Louisiana', 'Washington', 'California', 'Alabama'] covid19 = covid19[covid19['state'].isin(states_list)].copy() covid19['Resources'] = covid19.loc[:, ['allbed_mean', 'ICUbed_mean', 'InvVen_mean']].sum(axis=1).div(1000) fig, ax = plt.subplots(figsize=(20, 10)) for st in states_list: ax.plot(covid19[covid19.state == st].date, covid19[covid19.state == st].Resources, label=st) ax.xaxis.set_major_locator(WeekdayLocator()) ax.xaxis.set_major_formatter(DateFormatter('%b %d')) min_date = covid19.date[covid19.Resources != 0].min().date() - timedelta(days=7) max_date = covid19.date[covid19.Resources != 0].max().date() + timedelta(days=7) ax.set_xlim(min_date, max_date) fig.autofmt_xdate() ax.legend() font_size = 14 plt.title('The hospital resources needed for COVID-19 patients across 5 different US States', fontsize=font_size + 2) plt.ylabel('Total Resource Count (k)', fontsize=font_size) plt.xlabel('Date', fontsize=font_size) plt.show() fig.savefig('Hospital_resource_use.png', bbox_inches='tight')
code
34117774/cell_20
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val)) print('Base accuracy on regular images: ', model.evaluate(X_test, y_test, verbose=0))
code
34117774/cell_26
[ "text_plain_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) labels = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val)) def adversarial_pattern(image, label): image = tf.cast(image, tf.float32) with tf.GradientTape() as tape: tape.watch(image) prediction = model(image) loss = tf.keras.losses.MSE(label, prediction) gradient = tape.gradient(loss, image) signed_grad = tf.sign(gradient) return signed_grad image = X_train[11] image_label = y_train[11] perturbations = adversarial_pattern(image.reshape((1, img_rows, img_cols, channels)), image_label).numpy() adversarial = image + perturbations * 0.1 print('The true label was: {}'.format(labels[model.predict(image.reshape((1, img_rows, img_cols, channels))).argmax()])) print('The prediction after the attack is: {}'.format(labels[model.predict(adversarial).argmax()]))
code
34117774/cell_11
[ "text_plain_output_1.png" ]
import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) print('y_train set shape of {}'.format(y_train.shape)) print('y_test set shape of {}'.format(y_test.shape))
code
34117774/cell_19
[ "text_plain_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val))
code
34117774/cell_32
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) labels = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val)) def adversarial_pattern(image, label): image = tf.cast(image, tf.float32) with tf.GradientTape() as tape: tape.watch(image) prediction = model(image) loss = tf.keras.losses.MSE(label, prediction) gradient = tape.gradient(loss, image) signed_grad = tf.sign(gradient) return signed_grad image = X_train[11] image_label = y_train[11] perturbations = adversarial_pattern(image.reshape((1, img_rows, img_cols, channels)), image_label).numpy() adversarial = image + perturbations * 0.1 print('Base accuracy on adversarial images: {}'.format(model.evaluate(X_adversarial_test, y_adversarial_test, verbose=0)))
code
34117774/cell_28
[ "image_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import matplotlib.pyplot as plt import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) labels = ['zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine'] img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val)) def adversarial_pattern(image, label): image = tf.cast(image, tf.float32) with tf.GradientTape() as tape: tape.watch(image) prediction = model(image) loss = tf.keras.losses.MSE(label, prediction) gradient = tape.gradient(loss, image) signed_grad = tf.sign(gradient) return signed_grad image = X_train[11] image_label = y_train[11] perturbations = adversarial_pattern(image.reshape((1, img_rows, img_cols, channels)), image_label).numpy() adversarial = image + perturbations * 0.1 plt.imshow(image.reshape((img_rows, img_cols)))
code
34117774/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
print('X_train set shape of {}'.format(X_train.shape)) print('X_test set shape of {}'.format(X_test.shape)) print('y_train set shape of {}'.format(y_train.shape)) print('y_test set shape of {}'.format(y_test.shape))
code
34117774/cell_14
[ "text_plain_output_1.png" ]
import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) print('X_train set shape of {}'.format(X_train.shape)) print('X_val set shape of {}'.format(X_val.shape)) print('y_train set shape of {}'.format(y_train.shape)) print('y_val set shape of {}'.format(y_val.shape))
code
34117774/cell_10
[ "text_plain_output_1.png" ]
img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) print('X_train set shape of {}'.format(X_train.shape)) print('X_test set shape of {}'.format(X_test.shape))
code
34117774/cell_27
[ "text_plain_output_1.png" ]
from tensorflow.keras import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation import matplotlib.pyplot as plt import tensorflow as tf configuration = tf.compat.v1.ConfigProto() configuration.gpu_options.allow_growth = True session = tf.compat.v1.Session(config=configuration) img_rows, img_cols, channels = (28, 28, 1) num_classes = 10 X_train = X_train / 255 X_test = X_test / 255 X_train = X_train.reshape((-1, img_rows, img_cols, channels)) X_test = X_test.reshape((-1, img_rows, img_cols, channels)) y_train = tf.keras.utils.to_categorical(y_train, num_classes) y_test = tf.keras.utils.to_categorical(y_test, num_classes) def create_model(img_rows, img_cols, channels): model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu', input_shape=(img_rows, img_cols, channels))) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(Conv2D(64, kernel_size=(3, 3), strides=(3, 3), padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.2)) model.add(Flatten()) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(32)) model.add(Dropout(0.2)) model.add(Dense(num_classes, activation='softmax')) model.compile(optimizer='adam', loss='mse', metrics=['accuracy']) return model model = create_model(img_rows, img_cols, channels) model.fit(X_train, y_train, batch_size=32, epochs=32, validation_data=(X_val, y_val)) def adversarial_pattern(image, label): image = tf.cast(image, tf.float32) with tf.GradientTape() as tape: tape.watch(image) prediction = model(image) loss = tf.keras.losses.MSE(label, prediction) gradient = tape.gradient(loss, image) signed_grad = tf.sign(gradient) return signed_grad image = X_train[11] image_label = y_train[11] perturbations = adversarial_pattern(image.reshape((1, img_rows, img_cols, channels)), image_label).numpy() adversarial = image + perturbations * 0.1 if channels == 1: plt.imshow(adversarial.reshape((img_rows, img_cols))) else: plt.imshow(adversarial.reshape((img_rows, img_cols, channels)))
code
121148913/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) print(f'The dataset consist of {len(raw_behaviour)} number of interactions.') raw_behaviour.head()
code
121148913/cell_23
[ "text_html_output_1.png", "text_plain_output_1.png" ]
from collections import Counter from torch.utils.data import Dataset, DataLoader import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) news = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/news.tsv', sep='\t', names=['itemId', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']) def process_impression(impression_list): list_of_strings = impression_list.split() click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '1'] non_click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '0'] return (click, non_click) raw_behaviour['click'], raw_behaviour['noclicks'] = zip(*raw_behaviour['impressions'].map(process_impression)) raw_behaviour['epochhrs'] = pd.to_datetime(raw_behaviour['timestamp']).values.astype(np.int64) / 1000000.0 / 1000 / 3600 raw_behaviour['epochhrs'] = raw_behaviour['epochhrs'].round() raw_behaviour = raw_behaviour.explode('click').reset_index(drop=True) click_history = raw_behaviour[['userId', 'click_history']].drop_duplicates().dropna() click_history['click_history'] = click_history.click_history.map(lambda x: x.split()) click_history = click_history.explode('click_history').rename(columns={'click_history': 'click'}) click_history['epochhrs'] = raw_behaviour.epochhrs.min() click_history['noclicks'] = pd.Series([[] for _ in range(len(click_history.index))]) raw_behaviour = pd.concat([raw_behaviour, click_history], axis=0).reset_index(drop=True) min_click_cutoff = 100 raw_behaviour = raw_behaviour[raw_behaviour.groupby('click')['userId'].transform('size') >= min_click_cutoff].reset_index(drop=True) click_set = set(raw_behaviour['click'].unique()) raw_behaviour['noclicks'] = raw_behaviour['noclicks'].apply(lambda impressions: [impression for impression in impressions if impression in click_set]) behaviour = raw_behaviour[['epochhrs', 'userId', 'click', 'noclicks']].copy() test_time_th = behaviour['epochhrs'].quantile(0.9) train = behaviour[behaviour['epochhrs'] < test_time_th].copy() ind2item = {idx + 1: itemid for idx, itemid in enumerate(train.click.unique())} item2ind = {itemid: idx for idx, itemid in ind2item.items()} train['noclicks'] = train['noclicks'].map(lambda list_of_items: [item2ind.get(l, 0) for l in list_of_items]) train['click'] = train['click'].map(lambda item: item2ind.get(item, 0)) ind2user = {idx + 1: userid for idx, userid in enumerate(train['userId'].unique())} user2ind = {userid: idx for idx, userid in ind2user.items()} train['userIdx'] = train['userId'].map(lambda x: user2ind.get(x, 0)) valid = behaviour[behaviour['epochhrs'] >= test_time_th].copy() valid['click'] = valid['click'].map(lambda item: item2ind.get(item, 0)) valid['noclicks'] = valid['noclicks'].map(lambda list_of_items: [item2ind.get(l, 0) for l in list_of_items]) valid['userIdx'] = valid['userId'].map(lambda x: user2ind.get(x, 0)) class MindDataset(Dataset): def __init__(self, df): self.data = {'userIdx': torch.tensor(df.userIdx.values.astype(np.int64)), 'click': torch.tensor(df.click.values.astype(np.int64))} def __len__(self): return len(self.data['userIdx']) def __getitem__(self, idx): return {key: val[idx] for key, val in self.data.items()} bs = 1024 ds_train = MindDataset(train) train_loader = DataLoader(ds_train, batch_size=bs, shuffle=True) ds_valid = MindDataset(valid) valid_loader = DataLoader(ds_valid, batch_size=bs, shuffle=False) batch = next(iter(train_loader)) class NewsMF(pl.LightningModule): def __init__(self, num_users, num_items, dim=10): super().__init__() self.dim = dim self.useremb = nn.Embedding(num_embeddings=num_users, embedding_dim=dim) self.itememb = nn.Embedding(num_embeddings=num_items, embedding_dim=dim) self.num_users = num_users self.num_items = num_items def step(self, batch, batch_idx, phase='train'): batch_size = batch['userIdx'].size(0) uservec = self.useremb(batch['userIdx']) itemvec_click = self.itememb(batch['click']) neg_sample = torch.randint_like(batch['click'], 1, self.num_items) itemvec_noclick = self.itememb(neg_sample) score_click = torch.sigmoid((uservec * itemvec_click).sum(-1).unsqueeze(-1)) score_noclick = torch.sigmoid((uservec * itemvec_noclick).sum(-1).unsqueeze(-1)) scores_all = torch.concat((score_click, score_noclick), dim=1) target_all = torch.concat((torch.ones_like(score_click), torch.zeros_like(score_noclick)), dim=1) loss = F.binary_cross_entropy(scores_all, target_all) return loss def training_step(self, batch, batch_idx): return self.step(batch, batch_idx, 'train') def validation_step(self, batch, batch_idx): return self.step(batch, batch_idx, 'val') def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=0.001) return optimizer mf_model = NewsMF(num_users=len(ind2user) + 1, num_items=len(ind2item) + 1, dim=50) trainer = pl.Trainer(max_epochs=50, accelerator='gpu') trainer.fit(model=mf_model, train_dataloaders=train_loader) itememb = mf_model.itememb.weight.detach() news['ind'] = news['itemId'].map(item2ind) news = news.sort_values('ind').reset_index(drop=True) news['n_click_training'] = news['ind'].map(dict(Counter(train.click))) news.sort_values('n_click_training', ascending=False).head(15)
code
121148913/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) news = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/news.tsv', sep='\t', names=['itemId', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']) print(f'The article data consist in total of {len(news)} number of articles.') news.head()
code
121148913/cell_15
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) news = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/news.tsv', sep='\t', names=['itemId', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']) def process_impression(impression_list): list_of_strings = impression_list.split() click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '1'] non_click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '0'] return (click, non_click) raw_behaviour['click'], raw_behaviour['noclicks'] = zip(*raw_behaviour['impressions'].map(process_impression)) raw_behaviour['epochhrs'] = pd.to_datetime(raw_behaviour['timestamp']).values.astype(np.int64) / 1000000.0 / 1000 / 3600 raw_behaviour['epochhrs'] = raw_behaviour['epochhrs'].round() raw_behaviour = raw_behaviour.explode('click').reset_index(drop=True) click_history = raw_behaviour[['userId', 'click_history']].drop_duplicates().dropna() click_history['click_history'] = click_history.click_history.map(lambda x: x.split()) click_history = click_history.explode('click_history').rename(columns={'click_history': 'click'}) click_history['epochhrs'] = raw_behaviour.epochhrs.min() click_history['noclicks'] = pd.Series([[] for _ in range(len(click_history.index))]) raw_behaviour = pd.concat([raw_behaviour, click_history], axis=0).reset_index(drop=True) min_click_cutoff = 100 raw_behaviour = raw_behaviour[raw_behaviour.groupby('click')['userId'].transform('size') >= min_click_cutoff].reset_index(drop=True) click_set = set(raw_behaviour['click'].unique()) raw_behaviour['noclicks'] = raw_behaviour['noclicks'].apply(lambda impressions: [impression for impression in impressions if impression in click_set]) behaviour = raw_behaviour[['epochhrs', 'userId', 'click', 'noclicks']].copy() print('Number of interactions in the behaviour dataset:', behaviour.shape[0]) print('Number of users in the behaviour dataset:', behaviour.userId.nunique()) print('Number of articles in the behaviour dataset:', behaviour.click.nunique()) behaviour.head()
code
121148913/cell_22
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) news = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/news.tsv', sep='\t', names=['itemId', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']) def process_impression(impression_list): list_of_strings = impression_list.split() click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '1'] non_click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '0'] return (click, non_click) raw_behaviour['click'], raw_behaviour['noclicks'] = zip(*raw_behaviour['impressions'].map(process_impression)) raw_behaviour['epochhrs'] = pd.to_datetime(raw_behaviour['timestamp']).values.astype(np.int64) / 1000000.0 / 1000 / 3600 raw_behaviour['epochhrs'] = raw_behaviour['epochhrs'].round() raw_behaviour = raw_behaviour.explode('click').reset_index(drop=True) click_history = raw_behaviour[['userId', 'click_history']].drop_duplicates().dropna() click_history['click_history'] = click_history.click_history.map(lambda x: x.split()) click_history = click_history.explode('click_history').rename(columns={'click_history': 'click'}) click_history['epochhrs'] = raw_behaviour.epochhrs.min() click_history['noclicks'] = pd.Series([[] for _ in range(len(click_history.index))]) raw_behaviour = pd.concat([raw_behaviour, click_history], axis=0).reset_index(drop=True) min_click_cutoff = 100 raw_behaviour = raw_behaviour[raw_behaviour.groupby('click')['userId'].transform('size') >= min_click_cutoff].reset_index(drop=True) click_set = set(raw_behaviour['click'].unique()) raw_behaviour['noclicks'] = raw_behaviour['noclicks'].apply(lambda impressions: [impression for impression in impressions if impression in click_set]) behaviour = raw_behaviour[['epochhrs', 'userId', 'click', 'noclicks']].copy() test_time_th = behaviour['epochhrs'].quantile(0.9) train = behaviour[behaviour['epochhrs'] < test_time_th].copy() ind2item = {idx + 1: itemid for idx, itemid in enumerate(train.click.unique())} item2ind = {itemid: idx for idx, itemid in ind2item.items()} train['noclicks'] = train['noclicks'].map(lambda list_of_items: [item2ind.get(l, 0) for l in list_of_items]) train['click'] = train['click'].map(lambda item: item2ind.get(item, 0)) ind2user = {idx + 1: userid for idx, userid in enumerate(train['userId'].unique())} user2ind = {userid: idx for idx, userid in ind2user.items()} train['userIdx'] = train['userId'].map(lambda x: user2ind.get(x, 0)) valid = behaviour[behaviour['epochhrs'] >= test_time_th].copy() valid['click'] = valid['click'].map(lambda item: item2ind.get(item, 0)) valid['noclicks'] = valid['noclicks'].map(lambda list_of_items: [item2ind.get(l, 0) for l in list_of_items]) valid['userIdx'] = valid['userId'].map(lambda x: user2ind.get(x, 0)) class MindDataset(Dataset): def __init__(self, df): self.data = {'userIdx': torch.tensor(df.userIdx.values.astype(np.int64)), 'click': torch.tensor(df.click.values.astype(np.int64))} def __len__(self): return len(self.data['userIdx']) def __getitem__(self, idx): return {key: val[idx] for key, val in self.data.items()} bs = 1024 ds_train = MindDataset(train) train_loader = DataLoader(ds_train, batch_size=bs, shuffle=True) ds_valid = MindDataset(valid) valid_loader = DataLoader(ds_valid, batch_size=bs, shuffle=False) batch = next(iter(train_loader)) class NewsMF(pl.LightningModule): def __init__(self, num_users, num_items, dim=10): super().__init__() self.dim = dim self.useremb = nn.Embedding(num_embeddings=num_users, embedding_dim=dim) self.itememb = nn.Embedding(num_embeddings=num_items, embedding_dim=dim) self.num_users = num_users self.num_items = num_items def step(self, batch, batch_idx, phase='train'): batch_size = batch['userIdx'].size(0) uservec = self.useremb(batch['userIdx']) itemvec_click = self.itememb(batch['click']) neg_sample = torch.randint_like(batch['click'], 1, self.num_items) itemvec_noclick = self.itememb(neg_sample) score_click = torch.sigmoid((uservec * itemvec_click).sum(-1).unsqueeze(-1)) score_noclick = torch.sigmoid((uservec * itemvec_noclick).sum(-1).unsqueeze(-1)) scores_all = torch.concat((score_click, score_noclick), dim=1) target_all = torch.concat((torch.ones_like(score_click), torch.zeros_like(score_noclick)), dim=1) loss = F.binary_cross_entropy(scores_all, target_all) return loss def training_step(self, batch, batch_idx): return self.step(batch, batch_idx, 'train') def validation_step(self, batch, batch_idx): return self.step(batch, batch_idx, 'val') def configure_optimizers(self): optimizer = torch.optim.Adam(self.parameters(), lr=0.001) return optimizer mf_model = NewsMF(num_users=len(ind2user) + 1, num_items=len(ind2item) + 1, dim=50) trainer = pl.Trainer(max_epochs=50, accelerator='gpu') trainer.fit(model=mf_model, train_dataloaders=train_loader)
code
121148913/cell_12
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) raw_behaviour = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/behaviors.tsv', sep='\t', names=['impressionId', 'userId', 'timestamp', 'click_history', 'impressions']) news = pd.read_csv('/kaggle/input/mind-news-dataset/MINDsmall_train/news.tsv', sep='\t', names=['itemId', 'category', 'subcategory', 'title', 'abstract', 'url', 'title_entities', 'abstract_entities']) def process_impression(impression_list): list_of_strings = impression_list.split() click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '1'] non_click = [x.split('-')[0] for x in list_of_strings if x.split('-')[1] == '0'] return (click, non_click) raw_behaviour['click'], raw_behaviour['noclicks'] = zip(*raw_behaviour['impressions'].map(process_impression)) raw_behaviour['epochhrs'] = pd.to_datetime(raw_behaviour['timestamp']).values.astype(np.int64) / 1000000.0 / 1000 / 3600 raw_behaviour['epochhrs'] = raw_behaviour['epochhrs'].round() raw_behaviour = raw_behaviour.explode('click').reset_index(drop=True) click_history = raw_behaviour[['userId', 'click_history']].drop_duplicates().dropna() click_history['click_history'] = click_history.click_history.map(lambda x: x.split()) click_history = click_history.explode('click_history').rename(columns={'click_history': 'click'}) click_history['epochhrs'] = raw_behaviour.epochhrs.min() click_history['noclicks'] = pd.Series([[] for _ in range(len(click_history.index))]) raw_behaviour = pd.concat([raw_behaviour, click_history], axis=0).reset_index(drop=True) min_click_cutoff = 100 print(f'Number of items that have less than {min_click_cutoff} clicks make up', np.round(np.mean(raw_behaviour.groupby('click').size() < min_click_cutoff) * 100, 3), '% of the total, and these will be removed.')
code
49124155/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') target = train['Survived'] m = pd.DataFrame(test['PassengerId']) print('Shape of train:', train.shape) print('Shape of test:', test.shape)
code
49124155/cell_2
[ "text_plain_output_1.png" ]
test
code
49124155/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
49124155/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') target = train['Survived'] m = pd.DataFrame(test['PassengerId']) import seaborn as sns import matplotlib.pyplot as plt dataset = pd.concat([train.drop('Survived', axis=1), test]) dataset.isnull().sum() a = dataset.groupby('Pclass')['Age'].median() dataset['Age'] = dataset['Age'].fillna(dataset['Pclass'].map(a)) a = dataset.groupby('Pclass')['Fare'].median() dataset['Fare'] = dataset['Fare'].fillna(dataset['Pclass'].map(a)) dataset['Embarked'].fillna('S', inplace=True) dataset['Passenger'] = dataset['SibSp'] + dataset['Parch'] + 1 dataset
code
49124155/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import GradientBoostingClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') target = train['Survived'] m = pd.DataFrame(test['PassengerId']) import seaborn as sns import matplotlib.pyplot as plt dataset = pd.concat([train.drop('Survived', axis=1), test]) dataset.isnull().sum() a = dataset.groupby('Pclass')['Age'].median() dataset['Age'] = dataset['Age'].fillna(dataset['Pclass'].map(a)) a = dataset.groupby('Pclass')['Fare'].median() dataset['Fare'] = dataset['Fare'].fillna(dataset['Pclass'].map(a)) dataset['Embarked'].fillna('S', inplace=True) dataset['Name'].iloc[3].split()[1] a = [] for i in range(len(dataset)): a.append(dataset['Name'].iloc[i].split()[1]) a = pd.Series(a) dataset['Title'] = a a = [i for i in dataset.columns if dataset[i].dtypes == 'object'] b = dataset[a] for i in b.columns: dataset[i] = dataset[i].factorize()[0] train = dataset[:len(train)] test = dataset[len(train):] from sklearn.ensemble import GradientBoostingClassifier sky = GradientBoostingClassifier() sky.fit(train, target)
code
49124155/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('/kaggle/input/titanic/train.csv') test = pd.read_csv('/kaggle/input/titanic/test.csv') target = train['Survived'] m = pd.DataFrame(test['PassengerId']) import seaborn as sns import matplotlib.pyplot as plt dataset = pd.concat([train.drop('Survived', axis=1), test]) dataset.isnull().sum()
code
72082831/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') useful_features = [c for c in df_train.columns if c not in ('id', 'loss', 'kfold')] df_train[useful_features]
code
72082831/cell_6
[ "text_plain_output_1.png" ]
from sklearn.metrics import mean_squared_error from xgboost import XGBRegressor import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') useful_features = [c for c in df_train.columns if c not in ('id', 'loss', 'kfold')] df_train[useful_features] from xgboost import XGBRegressor xtrain = df_train[df_train.kfold != 3] xvalid = df_train[df_train.kfold == 3] ytrain = xtrain['loss'] xtrain = xtrain[useful_features] yvalid = xvalid['loss'] xvalid = xvalid[useful_features] model = XGBRegressor(n_estimators=500, random_state=3) model.fit(xtrain, ytrain, early_stopping_rounds=5, eval_set=[(xvalid, yvalid)], verbose=False) preds_valid = model.predict(xvalid) print(mean_squared_error(yvalid, preds_valid, squared=False))
code
72082831/cell_3
[ "text_html_output_1.png" ]
import pandas as pd df_train = pd.read_csv('../input/tabulardata-kfolds-created/train_folds.csv') df_test = pd.read_csv('../input/tabular-playground-series-aug-2021/test.csv') sample_submission = pd.read_csv('../input/tabular-playground-series-aug-2021/sample_submission.csv') sample_submission.head()
code
90133716/cell_13
[ "text_html_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import classification_report from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(xtr, ytr) print(precision_score(ytr, rf.predict(xtr))) print(classification_report(yte, rf.predict(xte)))
code
90133716/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report, precision_score lr = LogisticRegression() lr.fit(xtr, ytr) precision_score(ytr, lr.predict(xtr))
code
90133716/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import matplotlib.pyplot as plt colormap = plt.cm.get_cmap('Greens') fig, ax = plt.subplots(figsize=(12, 3)) plot = ax.pcolor(sales_salary.T, cmap=colormap, edgecolor='black') ax.set_xlabel('sales') ax.set_xticks(np.arange(len(sales_salary.index.values)) + 0.5) ax.set_xticklabels(sales_salary.index.values) ax.set_ylabel('salary') ax.set_yticks(np.arange(len(sales_salary.columns.values)) + 0.5) ax.set_yticklabels(sales_salary.columns.values) cbar = fig.colorbar(plot) cbar.ax.set_ylabel('quantity', rotation=360) cbar.ax.get_yaxis().labelpad = 25
code
90133716/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) df['left'].describe()
code
90133716/cell_2
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') df.head()
code
90133716/cell_11
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegressionCV lr_cv = LogisticRegressionCV() lr_cv.fit(xtr, ytr) precision_score(ytr, lr_cv.predict(xtr)) pd.DataFrame(lr_cv.scores_[1]).T.plot()
code
90133716/cell_8
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape)
code
90133716/cell_16
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import classification_report from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegressionCV lr_cv = LogisticRegressionCV() lr_cv.fit(xtr, ytr) precision_score(ytr, lr_cv.predict(xtr)) from sklearn.ensemble import RandomForestClassifier rf = RandomForestClassifier() rf.fit(xtr, ytr) print(rf.feature_importances_) pd.DataFrame(rf.feature_importances_).plot(kind='barh')
code
90133716/cell_3
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary
code
90133716/cell_14
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import classification_report from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import PolynomialFeatures import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.preprocessing import PolynomialFeatures poly = PolynomialFeatures(degree=3) poly.fit(df) xtr2 = poly.transform(xtr) lr_cv2 = LogisticRegression() lr_cv2.fit(xtr2, ytr) xte2 = poly.transform(xte) print(precision_score(ytr, lr_cv2.predict(xtr2))) print(classification_report(yte, lr_cv2.predict(xte2)))
code
90133716/cell_10
[ "text_html_output_1.png" ]
from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegressionCV lr_cv = LogisticRegressionCV() lr_cv.fit(xtr, ytr) precision_score(ytr, lr_cv.predict(xtr))
code
90133716/cell_12
[ "image_output_1.png" ]
from sklearn.linear_model import LogisticRegressionCV from sklearn.metrics import classification_report from sklearn.metrics import classification_report, precision_score from sklearn.model_selection import train_test_split import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) y = df['left'] df.drop(labels='left', axis=1, inplace=True) from sklearn.model_selection import train_test_split xtr, xte, ytr, yte = train_test_split(df, y, test_size=0.25) (xtr.shape, xte.shape, ytr.shape, yte.shape) from sklearn.linear_model import LogisticRegressionCV lr_cv = LogisticRegressionCV() lr_cv.fit(xtr, ytr) precision_score(ytr, lr_cv.predict(xtr)) from sklearn.metrics import classification_report print(classification_report(yte, lr_cv.predict(xte)))
code
90133716/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df = pd.read_csv('../input/HR_comma_sep.csv') sales_salary = pd.crosstab(df['sales'], df['salary'], normalize=False) sales_salary = sales_salary[['low', 'medium', 'high']] sales_salary['temp'] = sales_salary.index.values sales_salary.iloc[0, 3] = 'it' sales_salary.iloc[1, 3] = 'rand_d' sales_salary.sort_values(by='temp', inplace=True) sales_salary.set_index('temp', inplace=True) sales_salary.index.name = 'sales' sales_salary df = df.join(pd.get_dummies(df['salary'])) df = df.join(pd.get_dummies(df['sales']), rsuffix='d') df.drop(labels=['sales', 'salary'], inplace=True, axis=1) df.head()
code
33100747/cell_6
[ "image_output_1.png" ]
from dateutil.relativedelta import relativedelta from keras.layers.core import Dense from keras.layers.recurrent import LSTM from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hourly-energy-consumption/AEP_hourly.csv') from sklearn.preprocessing import MinMaxScaler from dateutil.relativedelta import relativedelta import datetime data = data.sort_values(by=['Datetime']) data['Datetime'] = data['Datetime'].astype('datetime64') data = data.loc[data['Datetime'] >= data['Datetime'][len(data['Datetime']) - 1] - relativedelta(years=3)] data.reset_index(inplace=True) scaler = MinMaxScaler() consumption = scaler.fit_transform(np.reshape(data['AEP_MW'].values, (-1, 1)))[:, 0] ratio = 0.8 split = int(np.floor(ratio * len(data))) input_length = 20 x_train = [consumption[i - input_length:i] for i in range(input_length, split)] x_test = [consumption[i - input_length:i] for i in range(input_length + split, len(consumption))] y_train = consumption[input_length:split] y_test = consumption[input_length + split:] x_train_lstm = np.reshape(x_train, (np.shape(x_train)[0], np.shape(x_train)[1], 1)) x_test_lstm = np.reshape(x_test, (np.shape(x_test)[0], np.shape(x_test)[1], 1)) from keras.models import Sequential from keras.layers.recurrent import LSTM from keras.layers.core import Dense lstm = Sequential() layers = [LSTM(units=128, input_shape=(input_length, 1), activation='sigmoid', return_sequences=True), LSTM(units=128, activation='sigmoid'), Dense(1)] for layer in layers: lstm.add(layer) lstm.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) history_lstm = lstm.fit(x_train_lstm, y_train, validation_data=(x_test_lstm, y_test), epochs=3, batch_size=32) import matplotlib.pyplot as plt plt.figure() plt.subplot(121) plt.plot(history_lstm.history['loss']) plt.plot(history_lstm.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epochs') plt.legend(['train', 'val'], loc='upper left') plt.subplot(122) plt.plot(history_lstm.history['accuracy']) plt.plot(history_lstm.history['val_accuracy']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epochs') plt.legend(['train', 'val'], loc='upper left') plt.show()
code
33100747/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hourly-energy-consumption/AEP_hourly.csv') print(data.columns) print(data.head)
code
33100747/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
33100747/cell_7
[ "image_output_1.png" ]
from dateutil.relativedelta import relativedelta from keras.layers.core import Dense from keras.layers.recurrent import LSTM from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import datetime import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hourly-energy-consumption/AEP_hourly.csv') from sklearn.preprocessing import MinMaxScaler from dateutil.relativedelta import relativedelta import datetime data = data.sort_values(by=['Datetime']) data['Datetime'] = data['Datetime'].astype('datetime64') data = data.loc[data['Datetime'] >= data['Datetime'][len(data['Datetime']) - 1] - relativedelta(years=3)] data.reset_index(inplace=True) scaler = MinMaxScaler() consumption = scaler.fit_transform(np.reshape(data['AEP_MW'].values, (-1, 1)))[:, 0] ratio = 0.8 split = int(np.floor(ratio * len(data))) input_length = 20 x_train = [consumption[i - input_length:i] for i in range(input_length, split)] x_test = [consumption[i - input_length:i] for i in range(input_length + split, len(consumption))] y_train = consumption[input_length:split] y_test = consumption[input_length + split:] x_train_lstm = np.reshape(x_train, (np.shape(x_train)[0], np.shape(x_train)[1], 1)) x_test_lstm = np.reshape(x_test, (np.shape(x_test)[0], np.shape(x_test)[1], 1)) from keras.models import Sequential from keras.layers.recurrent import LSTM from keras.layers.core import Dense lstm = Sequential() layers = [LSTM(units=128, input_shape=(input_length, 1), activation='sigmoid', return_sequences=True), LSTM(units=128, activation='sigmoid'), Dense(1)] for layer in layers: lstm.add(layer) lstm.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) history_lstm = lstm.fit(x_train_lstm, y_train, validation_data=(x_test_lstm, y_test), epochs=3, batch_size=32) import matplotlib.pyplot as plt predictions = lstm.predict(x_test_lstm) first_date = data['Datetime'][len(data) - len(y_test)] predicted_dates = [first_date + datetime.timedelta(hours=i) for i in range(len(x_test))] plt.figure() plt.plot(data['Datetime'], scaler.inverse_transform(np.reshape(consumption, (-1, 1))), color='b', alpha=0.7) plt.plot(predicted_dates, scaler.inverse_transform(np.reshape(predictions, (-1, 1))), color='r', alpha=0.4) plt.xlabel('Datetime') plt.ylabel('Energy consumption in MegaWatt') plt.title('American energy consumption evolution over time') plt.legend(['true data', 'prediction']) plt.show()
code
33100747/cell_3
[ "text_plain_output_1.png" ]
from dateutil.relativedelta import relativedelta from sklearn.preprocessing import MinMaxScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hourly-energy-consumption/AEP_hourly.csv') from sklearn.preprocessing import MinMaxScaler from dateutil.relativedelta import relativedelta import datetime data = data.sort_values(by=['Datetime']) data['Datetime'] = data['Datetime'].astype('datetime64') data = data.loc[data['Datetime'] >= data['Datetime'][len(data['Datetime']) - 1] - relativedelta(years=3)] data.reset_index(inplace=True) print(data) scaler = MinMaxScaler() consumption = scaler.fit_transform(np.reshape(data['AEP_MW'].values, (-1, 1)))[:, 0] print(consumption)
code
33100747/cell_5
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from dateutil.relativedelta import relativedelta from keras.layers.core import Dense from keras.layers.recurrent import LSTM from keras.models import Sequential from sklearn.preprocessing import MinMaxScaler import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_csv('/kaggle/input/hourly-energy-consumption/AEP_hourly.csv') from sklearn.preprocessing import MinMaxScaler from dateutil.relativedelta import relativedelta import datetime data = data.sort_values(by=['Datetime']) data['Datetime'] = data['Datetime'].astype('datetime64') data = data.loc[data['Datetime'] >= data['Datetime'][len(data['Datetime']) - 1] - relativedelta(years=3)] data.reset_index(inplace=True) scaler = MinMaxScaler() consumption = scaler.fit_transform(np.reshape(data['AEP_MW'].values, (-1, 1)))[:, 0] ratio = 0.8 split = int(np.floor(ratio * len(data))) input_length = 20 x_train = [consumption[i - input_length:i] for i in range(input_length, split)] x_test = [consumption[i - input_length:i] for i in range(input_length + split, len(consumption))] y_train = consumption[input_length:split] y_test = consumption[input_length + split:] x_train_lstm = np.reshape(x_train, (np.shape(x_train)[0], np.shape(x_train)[1], 1)) x_test_lstm = np.reshape(x_test, (np.shape(x_test)[0], np.shape(x_test)[1], 1)) from keras.models import Sequential from keras.layers.recurrent import LSTM from keras.layers.core import Dense lstm = Sequential() layers = [LSTM(units=128, input_shape=(input_length, 1), activation='sigmoid', return_sequences=True), LSTM(units=128, activation='sigmoid'), Dense(1)] for layer in layers: lstm.add(layer) lstm.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) history_lstm = lstm.fit(x_train_lstm, y_train, validation_data=(x_test_lstm, y_test), epochs=3, batch_size=32)
code
2013148/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tflearn df = pd.read_csv('../input/train.csv') X = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Survived'] X = X[columns] for i in columns: X = X[~X[i].isnull()] for i in range(1, 4): X['Pclass_' + str(i)] = X['Pclass'] == i del X['Pclass'] for i in X.Embarked.unique(): X[i] = X['Embarked'] == i del X['Embarked'] for i in X.Sex.unique(): X[i] = X['Sex'] == i del X['Sex'] y = pd.DataFrame({'Survived': X['Survived'], 'Not Survived': 1 - X['Survived']}) y.shape del X['Survived'] X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) net = tflearn.input_data(shape=[None, 9]) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net) model = tflearn.DNN(net) model.fit(X, y, n_epoch=200, batch_size=16, show_metric=True)
code
2013148/cell_11
[ "application_vnd.jupyter.stderr_output_2.png", "text_plain_output_1.png" ]
from sklearn.preprocessing import Imputer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tflearn df = pd.read_csv('../input/train.csv') X = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Survived'] X = X[columns] for i in columns: X = X[~X[i].isnull()] for i in range(1, 4): X['Pclass_' + str(i)] = X['Pclass'] == i del X['Pclass'] for i in X.Embarked.unique(): X[i] = X['Embarked'] == i del X['Embarked'] for i in X.Sex.unique(): X[i] = X['Sex'] == i del X['Sex'] y = pd.DataFrame({'Survived': X['Survived'], 'Not Survived': 1 - X['Survived']}) y.shape del X['Survived'] X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) net = tflearn.input_data(shape=[None, 9]) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net) model = tflearn.DNN(net) model.fit(X, y, n_epoch=200, batch_size=16, show_metric=True) df = pd.read_csv('../input/test.csv') X_test = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_test = X_test[columns] for i in range(1, 4): X_test['Pclass_' + str(i)] = X_test['Pclass'] == i del X_test['Pclass'] for i in X_test.Embarked.unique(): X_test[i] = X_test['Embarked'] == i del X_test['Embarked'] for i in X_test.Sex.unique(): X_test[i] = X_test['Sex'] == i del X_test['Sex'] X_test = np.array(X_test, dtype=np.float32) from sklearn.preprocessing import Imputer imputer = Imputer() X_test = imputer.fit_transform(X_test) pred = model.predict(X_test)
code
2013148/cell_1
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import tflearn from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2013148/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import Imputer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tflearn df = pd.read_csv('../input/train.csv') X = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Survived'] X = X[columns] for i in columns: X = X[~X[i].isnull()] for i in range(1, 4): X['Pclass_' + str(i)] = X['Pclass'] == i del X['Pclass'] for i in X.Embarked.unique(): X[i] = X['Embarked'] == i del X['Embarked'] for i in X.Sex.unique(): X[i] = X['Sex'] == i del X['Sex'] y = pd.DataFrame({'Survived': X['Survived'], 'Not Survived': 1 - X['Survived']}) y.shape del X['Survived'] X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) net = tflearn.input_data(shape=[None, 9]) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net) model = tflearn.DNN(net) model.fit(X, y, n_epoch=200, batch_size=16, show_metric=True) df = pd.read_csv('../input/test.csv') X_test = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_test = X_test[columns] for i in range(1, 4): X_test['Pclass_' + str(i)] = X_test['Pclass'] == i del X_test['Pclass'] for i in X_test.Embarked.unique(): X_test[i] = X_test['Embarked'] == i del X_test['Embarked'] for i in X_test.Sex.unique(): X_test[i] = X_test['Sex'] == i del X_test['Sex'] X_test = np.array(X_test, dtype=np.float32) from sklearn.preprocessing import Imputer imputer = Imputer() X_test = imputer.fit_transform(X_test) pred = model.predict(X_test) predict = np.zeros(len(pred)) for i in range(len(pred)): if pred[i][1] >= 0.5: predict[i] = 1 y_test = pd.read_csv('../input/gender_submission.csv') y_test = pd.DataFrame({'Survived': y_test['Survived'], 'Not Survived': 1 - y_test['Survived']}) y_test = np.array(y_test, dtype=np.float32) test = pd.read_csv('../input/test.csv') my_submission = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': predict.astype(int)}) my_submission.to_csv('submission.csv', index=False)
code
2013148/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import Imputer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tflearn df = pd.read_csv('../input/train.csv') X = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Survived'] X = X[columns] for i in columns: X = X[~X[i].isnull()] for i in range(1, 4): X['Pclass_' + str(i)] = X['Pclass'] == i del X['Pclass'] for i in X.Embarked.unique(): X[i] = X['Embarked'] == i del X['Embarked'] for i in X.Sex.unique(): X[i] = X['Sex'] == i del X['Sex'] y = pd.DataFrame({'Survived': X['Survived'], 'Not Survived': 1 - X['Survived']}) y.shape del X['Survived'] X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) net = tflearn.input_data(shape=[None, 9]) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net) model = tflearn.DNN(net) model.fit(X, y, n_epoch=200, batch_size=16, show_metric=True) df = pd.read_csv('../input/test.csv') X_test = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_test = X_test[columns] for i in range(1, 4): X_test['Pclass_' + str(i)] = X_test['Pclass'] == i del X_test['Pclass'] for i in X_test.Embarked.unique(): X_test[i] = X_test['Embarked'] == i del X_test['Embarked'] for i in X_test.Sex.unique(): X_test[i] = X_test['Sex'] == i del X_test['Sex'] X_test = np.array(X_test, dtype=np.float32) from sklearn.preprocessing import Imputer imputer = Imputer() X_test = imputer.fit_transform(X_test) pred = model.predict(X_test) predict = np.zeros(len(pred)) for i in range(len(pred)): if pred[i][1] >= 0.5: predict[i] = 1 y_test = pd.read_csv('../input/gender_submission.csv') y_test = pd.DataFrame({'Survived': y_test['Survived'], 'Not Survived': 1 - y_test['Survived']}) y_test = np.array(y_test, dtype=np.float32) model.evaluate(X_test, y_test, batch_size=16)
code
2013148/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import Imputer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import tflearn df = pd.read_csv('../input/train.csv') X = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked', 'Survived'] X = X[columns] for i in columns: X = X[~X[i].isnull()] for i in range(1, 4): X['Pclass_' + str(i)] = X['Pclass'] == i del X['Pclass'] for i in X.Embarked.unique(): X[i] = X['Embarked'] == i del X['Embarked'] for i in X.Sex.unique(): X[i] = X['Sex'] == i del X['Sex'] y = pd.DataFrame({'Survived': X['Survived'], 'Not Survived': 1 - X['Survived']}) y.shape del X['Survived'] X = np.array(X, dtype=np.float32) y = np.array(y, dtype=np.float32) net = tflearn.input_data(shape=[None, 9]) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 32) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net) model = tflearn.DNN(net) model.fit(X, y, n_epoch=200, batch_size=16, show_metric=True) df = pd.read_csv('../input/test.csv') X_test = df.copy() columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] X_test = X_test[columns] for i in range(1, 4): X_test['Pclass_' + str(i)] = X_test['Pclass'] == i del X_test['Pclass'] for i in X_test.Embarked.unique(): X_test[i] = X_test['Embarked'] == i del X_test['Embarked'] for i in X_test.Sex.unique(): X_test[i] = X_test['Sex'] == i del X_test['Sex'] X_test = np.array(X_test, dtype=np.float32) from sklearn.preprocessing import Imputer imputer = Imputer() X_test = imputer.fit_transform(X_test) pred = model.predict(X_test) predict = np.zeros(len(pred)) for i in range(len(pred)): if pred[i][1] >= 0.5: predict[i] = 1
code
130020137/cell_13
[ "text_plain_output_1.png" ]
from collections import deque from collections import deque antrian = deque([1, 2, 3, 4, 5]) print('Jumlah Antrian : ', antrian) antrian.append(6) print('Nasabah ke ', 6) print('Jumlah Antrian :', antrian) antrian.append(7) print('Nasabah ke ', 7) print('Jumlah Antrian :', antrian) out = antrian.popleft() print('Nasabah yang keluar', out) print('Jumlah Nasabah Sekarang :', antrian) out = antrian.popleft() print('Nasabah yang keluar', out) print('Jumlah Nasabah Sekarang :', antrian) out = antrian.popleft() print('Nasabah yang keluar', out) print('Jumlah Nasabah Sekarang :', antrian) antrian.append(8) print('Nasabah ke ', 8) print('Jumlah Antrian :', antrian)
code
130020137/cell_4
[ "text_plain_output_1.png" ]
batubata = [1, 2, 3, 4, 5] print(batubata) batubata.append(6) print('Batu Bata yang ditambah menjadi', 6) print('Batu Bata yang diangkut', batubata) batubata.append(7) print('Batu Bata yang ditambah menjadi', 7) print('Batu Bata yang diangkut', batubata) batubatalelah = batubata.pop() print('Batu bata yang dikeluarkan adalah :', batubatalelah) print('Jumlah Batu Bata yang diangkut :', batubata)
code
130020137/cell_6
[ "text_plain_output_1.png" ]
sepedamotor = [1, 2, 3] print('Jumlah Sepeda Motor :', sepedamotor) sepedamotor.append(4) print('Penambahan Sepeda motor menjadi', 4) print('Jumlah Sepeda Motor : ', sepedamotor) sepedamotor.pop() print('Pengambilan Sepeda Motor', sepedamotor) print('Jumlah Sepeda Motor : ', sepedamotor) sepedamotor.pop() print('Pengambilan Sepeda Motor ', sepedamotor) print('Jumlah Sepeda Motor : ', sepedamotor)
code
130020137/cell_2
[ "text_plain_output_1.png" ]
buku = [1, 2, 3, 4, 5, 6] print('Jumlah Buku Awal:', buku) buku.append(7) print('Penambahan Buku', 7) print('Jumlah Buku : ', buku) buku.append(8) print('Penambahan Buku', 8) print('Jumlah Buku : ', buku) buku.pop() print('Pengambilan Buku oleh Pelanggan', buku) print('Jumlah Buku : ', buku)
code
130020137/cell_19
[ "text_plain_output_1.png" ]
from collections import deque from collections import deque from collections import deque from collections import deque from collections import deque antrian = deque([1, 2, 3, 4, 5]) antrian.append(6) antrian.append(7) out = antrian.popleft() out = antrian.popleft() out = antrian.popleft() antrian.append(8) from collections import deque antrian = deque([1, 2, 3, 4, 5]) npm = deque([200902001, 200902002, 200902003, 200902004, 200902005]) antrian.append(6) antrian.append(7) out = antrian.popleft() outnpm = npm.popleft() from collections import deque antrian = deque([5, 6, 7, 8, 9]) antrian.append(10) antrian.append(11) out = antrian.popleft() out = antrian.popleft() from collections import deque antrian = deque([1, 2, 3, 4, 5]) print('jumlah antrian :', antrian) antrian.append(6) print('antrian ke ', 6) print('jumlah antrian : ', antrian) out = antrian.popleft() print('antrian yang keluar ', out) print('jumlah antrian :', antrian) antrian.append(7) print('antrian ke', 7) print('jumlah antrian :', antrian) out = antrian.popleft() print('antrian yang keluar ', out) print('jumlah antrian :', antrian)
code
130020137/cell_8
[ "text_plain_output_1.png" ]
baju = [1, 2, 3, 4, 5] print('jumlah baju awal:', baju) baju.append(6) print('penambahan baju', 6) print('jumlah baju : ', baju) baju.pop() print('pengambilan baju oleh sibapak', baju) print('jumlah baju : ', baju)
code
130020137/cell_15
[ "text_plain_output_1.png" ]
from collections import deque from collections import deque from collections import deque antrian = deque([1, 2, 3, 4, 5]) antrian.append(6) antrian.append(7) out = antrian.popleft() out = antrian.popleft() out = antrian.popleft() antrian.append(8) from collections import deque antrian = deque([1, 2, 3, 4, 5]) npm = deque([200902001, 200902002, 200902003, 200902004, 200902005]) print('Jumlah Mahasiswa : ', antrian) print('Nomor Antrian Mahasiswa :', npm) antrian.append(6) print('Mahasiswa ke : ', 6) print('Nomor Npm : ', 200902006) print('Jumlah Mahasiswa:', antrian) antrian.append(7) print('Mahasiswa ke : ', 7) print('Nomor Npm : ', 200902007) print('Jumlah Mahasiswa: ', antrian) out = antrian.popleft() outnpm = npm.popleft() print('Mahasiswa yang ke: ', out) print(npm) print('Jumlah Mahasiswa Sekarang : ', antrian)
code
130020137/cell_17
[ "text_plain_output_1.png" ]
from collections import deque from collections import deque from collections import deque from collections import deque antrian = deque([1, 2, 3, 4, 5]) antrian.append(6) antrian.append(7) out = antrian.popleft() out = antrian.popleft() out = antrian.popleft() antrian.append(8) from collections import deque antrian = deque([1, 2, 3, 4, 5]) npm = deque([200902001, 200902002, 200902003, 200902004, 200902005]) antrian.append(6) antrian.append(7) out = antrian.popleft() outnpm = npm.popleft() from collections import deque antrian = deque([5, 6, 7, 8, 9]) print('Jumlah Antrian : ', antrian) antrian.append(10) print('Pembeli ke ', 10) print('Jumlah Antrian :', antrian) antrian.append(11) print('Pembeli ke ', 11) print('Jumlah Antrian :', antrian) out = antrian.popleft() print('Pembeli yang keluar', out) print('Jumlah Pembeli :', antrian) out = antrian.popleft() print('Pembeli yang keluar', out) print('Jumlah Pembeli :', antrian)
code
130020137/cell_10
[ "text_plain_output_1.png" ]
baju = [1, 2, 3, 4, 5] baju.append(6) baju.pop() baju = [5, 6, 7, 8, 9, 10] print('jumlah baju awal:', baju) lipatan = [5, 6, 7, 8, 9] print('jumlah baju yang sudah dilipat:', lipatan) lipatan.pop(4) print('pengambilan baju oleh siadik : ', lipatan) lipatan.append(10) print('Akhir jumlah baju : ', lipatan)
code
105204964/cell_21
[ "image_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): az.plot_kde(data[i],fill_kwargs={"alpha":0.5},ax=ax) ax.set_title(i) sns.despine() plt.show() import warnings warnings.filterwarnings("ignore") ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): sns.boxplot(data[i],ax=ax) ax.set_title(i) sns.despine() plt.show() sns.despine() ys = ['WRI','Exposure','Vulnerability','Susceptibility'] fig, ((a,b),(c,d)) = plt.subplots (2, 2, figsize=(12, 12)) for i,t in zip(ys,[a,b,c,d]): sns.stripplot(x='Year', y=i, hue=i+' Category',data=data, ax = t) t.legend(ncol=3) t.set_title(i) sns.despine() ys = ['WRI', 'Exposure', 'Vulnerability', 'Susceptibility'] fig, ((a, b), (c, d)) = plt.subplots(2, 2, figsize=(12, 12)) for i, t in zip(ys, [a, b, c, d]): sns.violinplot(x=i + ' Category', y=i, data=data.sort_values(by=i), ax=t) t.legend(ncol=3) t.set_title(i) sns.despine()
code
105204964/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] summary('Vulnerability', 'Vulnerability Category', 2016)
code
105204964/cell_9
[ "image_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') data.isna().sum().sum() data.isna().sum().sum()
code
105204964/cell_4
[ "image_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique()))
code
105204964/cell_20
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): az.plot_kde(data[i],fill_kwargs={"alpha":0.5},ax=ax) ax.set_title(i) sns.despine() plt.show() import warnings warnings.filterwarnings("ignore") ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): sns.boxplot(data[i],ax=ax) ax.set_title(i) sns.despine() plt.show() sns.despine() ys = ['WRI', 'Exposure', 'Vulnerability', 'Susceptibility'] fig, ((a, b), (c, d)) = plt.subplots(2, 2, figsize=(12, 12)) for i, t in zip(ys, [a, b, c, d]): sns.stripplot(x='Year', y=i, hue=i + ' Category', data=data, ax=t) t.legend(ncol=3) t.set_title(i) sns.despine()
code
105204964/cell_2
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') data.head()
code
105204964/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] summary('WRI', 'WRI Category', 2020)
code
105204964/cell_19
[ "text_html_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): az.plot_kde(data[i],fill_kwargs={"alpha":0.5},ax=ax) ax.set_title(i) sns.despine() plt.show() import warnings warnings.filterwarnings("ignore") ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): sns.boxplot(data[i],ax=ax) ax.set_title(i) sns.despine() plt.show() sns.catplot(x='Year', y='WRI', hue='WRI' + ' Category', data=data, kind='point') sns.despine()
code
105204964/cell_18
[ "text_html_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): az.plot_kde(data[i],fill_kwargs={"alpha":0.5},ax=ax) ax.set_title(i) sns.despine() plt.show() import warnings warnings.filterwarnings("ignore") ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): sns.boxplot(data[i],ax=ax) ax.set_title(i) sns.despine() plt.show() sns.pairplot(data=data) plt.show()
code
105204964/cell_16
[ "text_plain_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI', 'Exposure', 'Vulnerability', 'Susceptibility', 'Lack of Coping Capabilities', ' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax, i in zip(axs.ravel(), ys): az.plot_kde(data[i], fill_kwargs={'alpha': 0.5}, ax=ax) ax.set_title(i) sns.despine() plt.show()
code
105204964/cell_3
[ "image_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') data.describe()
code
105204964/cell_17
[ "text_html_output_1.png" ]
import arviz as az import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import warnings import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data.loc[1292, 'WRI Category'] = 'Medium' data.loc[1193, 'Vulnerability Category'] = 'Very Low' data.loc[1202, 'Vulnerability Category'] = 'Very Low' data.loc[1205, 'Vulnerability Category'] = 'Very Low' data.loc[1858, 'Vulnerability Category'] = 'Very Low' data.loc[1858, ' Lack of Adaptive Capacities'] = np.mean(data[' Lack of Adaptive Capacities']) ys = ['WRI','Exposure','Vulnerability','Susceptibility','Lack of Coping Capabilities',' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax,i in zip(axs.ravel(),ys): az.plot_kde(data[i],fill_kwargs={"alpha":0.5},ax=ax) ax.set_title(i) sns.despine() plt.show() import warnings warnings.filterwarnings('ignore') ys = ['WRI', 'Exposure', 'Vulnerability', 'Susceptibility', 'Lack of Coping Capabilities', ' Lack of Adaptive Capacities'] fig, axs = plt.subplots(nrows=2, ncols=3, figsize=(15, 12)) for ax, i in zip(axs.ravel(), ys): sns.boxplot(data[i], ax=ax) ax.set_title(i) sns.despine() plt.show()
code
105204964/cell_10
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] data.isna().sum().sum() data[pd.isnull(data).any(axis=1)]
code
105204964/cell_12
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') (len(data['Year'].unique()), np.sort(data['Year'].unique()), len(data['Region'].unique())) data.isna().sum().sum() def summary(col1, col2, year): pivot = pd.pivot_table(data, values=[col1], index=['Year', col2], aggfunc={col1: [min, max, np.mean, np.std]}) pivot = pivot.sort_values(by=(col1, 'mean')).reindex(np.sort(data['Year'].unique()), level=0) return pivot.loc[year, :] summary('Vulnerability', 'Vulnerability Category', 2019)
code
105204964/cell_5
[ "image_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd import os import seaborn as sns import matplotlib import matplotlib.pyplot as plt import arviz as az data = pd.read_csv('../input/global-disaster-risk-index-time-series-dataset/world_risk_index.csv') data.isna().sum().sum()
code
90155584/cell_13
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) df_bank = marketing.groupby(['y']).apply(lambda x: x.sample(n=199, random_state=123)) df_bank.drop(columns='y', axis=1, inplace=True) df_bank.reset_index(inplace=True) df_bank.drop(columns='level_1', axis=1, inplace=True) df_bank.groupby('y')['campaign'].count() df_bank.groupby('y').agg({'campaign': ['median', 'mean']})
code
90155584/cell_9
[ "text_plain_output_1.png" ]
N = 45211 e = 0.05 n = N / (1 + N * e ** 2) n
code
90155584/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5)
code
90155584/cell_6
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) marketing.describe()
code
90155584/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 8)) sns.histplot(data=marketing, x='y', ax=ax1) sns.boxplot(data=marketing, x='y', y='campaign', ax=ax2) plt.show()
code
90155584/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd import scipy.stats as st df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) df_bank = marketing.groupby(['y']).apply(lambda x: x.sample(n=199, random_state=123)) df_bank.drop(columns='y', axis=1, inplace=True) df_bank.reset_index(inplace=True) df_bank.drop(columns='level_1', axis=1, inplace=True) df_bank.groupby('y')['campaign'].count() df_bank.groupby('y').agg({'campaign': ['median', 'mean']}) yes = df_bank[df_bank['y'] == 'yes'] no = df_bank[df_bank['y'] == 'no'] mannwhitneyu = st.mannwhitneyu(yes['campaign'], no['campaign']) p_value = mannwhitneyu.pvalue print('P-Value :', p_value) if p_value >= 0.05: print('Accept H0') else: print('Reject H0, Accept Ha')
code
90155584/cell_14
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) #Visualize the data fig, (ax1, ax2) = plt.subplots(1,2, figsize=(8,8)) sns.histplot(data=marketing, x='y', ax=ax1) sns.boxplot(data=marketing, x='y', y='campaign', ax=ax2) plt.show() df_bank = marketing.groupby(['y']).apply(lambda x: x.sample(n=199, random_state=123)) df_bank.drop(columns='y', axis=1, inplace=True) df_bank.reset_index(inplace=True) df_bank.drop(columns='level_1', axis=1, inplace=True) df_bank.groupby('y')['campaign'].count() df_bank.groupby('y').agg({'campaign': ['median', 'mean']}) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 8)) sns.barplot(data=df_bank, x='y', y='campaign', estimator=np.median, ax=ax1) sns.barplot(data=df_bank, x='y', y='campaign', ax=ax2) plt.show()
code
90155584/cell_12
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/bank-marketing/bank-full.csv') marketing = df[['campaign', 'y']] marketing.sample(5) df_bank = marketing.groupby(['y']).apply(lambda x: x.sample(n=199, random_state=123)) df_bank.drop(columns='y', axis=1, inplace=True) df_bank.reset_index(inplace=True) df_bank.drop(columns='level_1', axis=1, inplace=True) df_bank.groupby('y')['campaign'].count()
code
32067430/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv') team_stats.head(5)
code
32067430/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) team_stats = pd.read_csv('/kaggle/input/college-basketball-dataset/cbb.csv') avg_off = team_stats['ADJOE'].mean() avg_def = team_stats['ADJDE'].mean() team_stats[team_stats['POSTSEASON'] == 'Champions']['ADJOE'].mean() - avg_off
code