path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
90108519/cell_20
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] df.describe()
code
90108519/cell_29
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('The difference between the genders in total and percentage terms', fontweight='bold', fontsize = 12) ax1.set_xlabel('year', fontsize = 10,fontweight='bold') ax1.set_ylabel('total',fontweight='bold', fontsize = 10, color = 'green') plt.plot(df['year'], df['male'] - df['female'], linewidth=3,label= 'total', color = 'green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('percent', fontweight='bold', fontsize = 10) plt.plot(df['year'], (df['male'] - df['female'])/df['total']*100, linewidth=3, color = 'black', label= 'percent') ax2.tick_params(axis='y') ax2.yaxis.set_major_formatter(mtick.PercentFormatter()) fig.tight_layout() fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('Changing of population growth', fontweight='bold', fontsize=12) ax1.set_xlabel('year', fontsize=10, fontweight='bold') ax1.set_ylabel('total number', fontweight='bold', fontsize=10, color='green') plt.plot(df['year'], df['total'], linewidth=3, label='total', color='green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('total growth', fontweight='bold', fontsize=10) plt.plot(df['year'], df['total'] - df['total'].shift(), linewidth=3, color='black', label='percent') ax2.tick_params(axis='y') plt.axhline(y=0, color='red', linestyle='--') fig.tight_layout()
code
90108519/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.ticker as mtick import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] fig, ax1 = plt.subplots(figsize=(12, 5)) plt.title('The difference between the genders in total and percentage terms', fontweight='bold', fontsize=12) ax1.set_xlabel('year', fontsize=10, fontweight='bold') ax1.set_ylabel('total', fontweight='bold', fontsize=10, color='green') plt.plot(df['year'], df['male'] - df['female'], linewidth=3, label='total', color='green') ax1.tick_params(axis='y') ax2 = ax1.twinx() ax2.set_ylabel('percent', fontweight='bold', fontsize=10) plt.plot(df['year'], (df['male'] - df['female']) / df['total'] * 100, linewidth=3, color='black', label='percent') ax2.tick_params(axis='y') ax2.yaxis.set_major_formatter(mtick.PercentFormatter()) fig.tight_layout()
code
90108519/cell_11
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) df.info()
code
90108519/cell_7
[ "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.head()
code
90108519/cell_18
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] print('Cases of nonconformity by territory: {}'.format(sum(df['total'] - df['urban'] - df['rural'])))
code
90108519/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask]
code
90108519/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/chinas-population-by-gender-and-urbanrural/Chinas Population En.csv') df.columns = ['year', 'total', 'male', 'female', 'urban', 'rural'] df.sort_values(by='year', ignore_index=True, inplace=True) tmp_mask = df['total'] - df['male'] - df['female'] != 0 df[tmp_mask] df.loc[tmp_mask, 'total'] = df.loc[tmp_mask, 'male'] + df.loc[tmp_mask, 'female'] print('Cases of nonconformity by gender: {}'.format(sum(df['total'] - df['male'] - df['female'])))
code
16119977/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.windspeed.plot(kind='box')
code
16119977/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum()
code
16119977/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.info()
code
16119977/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum()
code
16119977/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['temp'].unique()
code
16119977/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
16119977/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape
code
16119977/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.registered.plot(kind='box')
code
16119977/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['count'].plot(kind='box')
code
16119977/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.head()
code
16119977/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf['count'].value_counts()
code
16119977/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum() dataf.casual.plot(kind='box')
code
16119977/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T dataf.duplicated().sum() dataf.shape dataf.drop_duplicates(inplace=True) dataf.duplicated().sum() dataf.isna().sum()
code
16119977/cell_12
[ "text_html_output_1.png" ]
dataf.season.plot(kind='box')
code
16119977/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) dataf = pd.read_csv('../input/bike_share.csv') dataf.describe().T
code
2016288/cell_9
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Flatten from keras.models import Sequential from keras.utils import to_categorical from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') from keras.utils import to_categorical from sklearn.model_selection import train_test_split img_rows, img_cols = (28, 28) input_shape = (img_rows, img_cols, 1) X = fashion_train.drop(['label'], axis=1).values y = to_categorical(fashion_train['label'].values) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) X_test = fashion_test.drop(['label'], axis=1).values y_test = to_categorical(fashion_test['label'].values) from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization batch_size = 256 num_classes = 10 epochs = 50 img_rows, img_cols = (28, 28) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=input_shape)) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
code
2016288/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') print(fashion_train.isnull().sum().sum()) print(fashion_test.isnull().sum().sum())
code
2016288/cell_11
[ "text_plain_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Flatten from keras.models import Sequential from keras.utils import to_categorical from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') from keras.utils import to_categorical from sklearn.model_selection import train_test_split img_rows, img_cols = (28, 28) input_shape = (img_rows, img_cols, 1) X = fashion_train.drop(['label'], axis=1).values y = to_categorical(fashion_train['label'].values) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) X_test = fashion_test.drop(['label'], axis=1).values y_test = to_categorical(fashion_test['label'].values) X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_val = X_val.astype('float32') X_train /= 255 X_test /= 255 X_val /= 255 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization batch_size = 256 num_classes = 10 epochs = 50 img_rows, img_cols = (28, 28) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=input_shape)) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, y_val)) score = model.evaluate(X_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1])
code
2016288/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2016288/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') fashion_train.head()
code
2016288/cell_10
[ "text_html_output_1.png" ]
from keras.layers import Conv2D, MaxPooling2D from keras.layers import Dense, Dropout, Flatten from keras.models import Sequential from keras.utils import to_categorical from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') from keras.utils import to_categorical from sklearn.model_selection import train_test_split img_rows, img_cols = (28, 28) input_shape = (img_rows, img_cols, 1) X = fashion_train.drop(['label'], axis=1).values y = to_categorical(fashion_train['label'].values) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) X_test = fashion_test.drop(['label'], axis=1).values y_test = to_categorical(fashion_test['label'].values) X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1) X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1) X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_val = X_val.astype('float32') X_train /= 255 X_test /= 255 X_val /= 255 from keras.models import Sequential from keras.layers import Dense, Dropout, Flatten from keras.layers import Conv2D, MaxPooling2D from keras.layers.normalization import BatchNormalization batch_size = 256 num_classes = 10 epochs = 50 img_rows, img_cols = (28, 28) model = Sequential() model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', kernel_initializer='he_normal', input_shape=input_shape)) model.add(MaxPooling2D((2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(64, (3, 3), activation='relu')) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(128, (3, 3), activation='relu')) model.add(Dropout(0.4)) model.add(Flatten()) model.add(Dense(128, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() history = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_val, y_val)) score = model.evaluate(X_test, y_test, verbose=0)
code
2016288/cell_5
[ "text_plain_output_1.png" ]
from keras.utils import to_categorical from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) fashion_train = pd.read_csv('../input/fashion-mnist_train.csv') fashion_test = pd.read_csv('../input/fashion-mnist_test.csv') from keras.utils import to_categorical from sklearn.model_selection import train_test_split img_rows, img_cols = (28, 28) input_shape = (img_rows, img_cols, 1) X = fashion_train.drop(['label'], axis=1).values y = to_categorical(fashion_train['label'].values) X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42) X_test = fashion_test.drop(['label'], axis=1).values y_test = to_categorical(fashion_test['label'].values)
code
326868/cell_4
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] plt.hist(ticket_nums, 50) plt.xlabel('Ticket number') plt.ylabel('Count') plt.show()
code
326868/cell_20
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') train = full[full.PassengerId < 892] test = full[full.PassengerId >= 892] rf = RandomForestClassifier(n_estimators=100, oob_score=True) rf.fit(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived']) rf.score(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived'])
code
326868/cell_6
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] plt.hist(ticket_nums, 50) plt.xlabel('Ticket number') plt.ylabel('Count') plt.show()
code
326868/cell_2
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp']
code
326868/cell_18
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') full['Deck'].fillna('N', inplace=True) encoders['Deck'] = LabelEncoder() encoders['Deck'].fit(full['Deck']) full['Deck'] = full['Deck'].apply(encoders['Deck'].transform)
code
326868/cell_8
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True)
code
326868/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') full_with_deck = full[full['Deck'].notnull()] full_without_deck = full[~full['Deck'].notnull()] full_with_deck_means, full_without_deck_means = ([], []) for col in full_with_deck: if col not in ['Deck', 'PassengerId']: sum_means = np.nanmean(full_with_deck[col].values) + np.nanmean(full_without_deck[col].values) full_with_deck_means.append(np.nanmean(full_with_deck[col].values) / sum_means) full_without_deck_means.append(np.nanmean(full_without_deck[col].values) / sum_means) bar_width = 0.35 opacity = 0.4 x_index = np.arange(len(full_with_deck_means)) plt.bar(x_index, full_with_deck_means, bar_width, alpha=opacity, color='b', label='With deck value') plt.bar(x_index + bar_width, full_without_deck_means, bar_width, alpha=opacity, color='r', label='Missing deck value') plt.legend() plt.ylabel('Ratio of means') plt.xticks(x_index + bar_width, [col for col in full_with_deck if col not in ['PassengerId', 'Deck']]) plt.show()
code
326868/cell_24
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') full_with_deck = full[full['Deck'].notnull()] full_without_deck = full[~full['Deck'].notnull()] full_with_deck_means, full_without_deck_means = ([], []) for col in full_with_deck: if col not in ['Deck', 'PassengerId']: sum_means = np.nanmean(full_with_deck[col].values) + np.nanmean(full_without_deck[col].values) full_with_deck_means.append(np.nanmean(full_with_deck[col].values) / sum_means) full_without_deck_means.append(np.nanmean(full_without_deck[col].values) / sum_means) bar_width = 0.35 opacity = 0.4 x_index = np.arange(len(full_with_deck_means)) plt.xticks(x_index + bar_width, [col for col in full_with_deck if col not in ['PassengerId', 'Deck']]) train = full[full.PassengerId < 892] test = full[full.PassengerId >= 892] rf = RandomForestClassifier(n_estimators=100, oob_score=True) rf.fit(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived']) rf.score(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived']) rf.oob_score_ features = list(zip(train.drop(['Survived', 'PassengerId'], axis=1).columns.values, rf.feature_importances_)) features.sort(key=lambda f: f[1]) names = [f[0] for f in features] lengths = [f[1] for f in features] pos = np.arange(len(features)) + 0.5 plt.barh(pos, lengths, align='center', color='r', alpha=opacity) plt.yticks(pos, names) plt.xlabel('Gini importance') plt.show()
code
326868/cell_14
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from collections import Counter from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') Counter(full['Deck'].values)
code
326868/cell_22
[ "application_vnd.jupyter.stderr_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') train = full[full.PassengerId < 892] test = full[full.PassengerId >= 892] rf = RandomForestClassifier(n_estimators=100, oob_score=True) rf.fit(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived']) rf.score(train.drop(['Survived', 'PassengerId'], axis=1), train['Survived']) rf.oob_score_
code
326868/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId')
code
326868/cell_12
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt import numpy as np import pandas as pd import string import numpy as np import pandas as pd import matplotlib.pyplot as plt from collections import Counter import string from sklearn.ensemble import RandomForestClassifier from sklearn.preprocessing import LabelEncoder from sklearn.linear_model import LinearRegression pd.options.mode.chained_assignment = None def get_title(name): name = name.split(',')[1] name = name.split('.')[0] return name.strip() def get_title_grouped(name): title = get_title(name) if title in ['Rev', 'Dr', 'Col', 'Major', 'the Countess', 'Sir', 'Lady', 'Jonkheer', 'Capt', 'Dona', 'Don']: title = 'Rare' elif title in ['Ms', 'Mlle']: title = 'Miss' elif title == 'Mme': title = 'Mrs' return title def get_deck(cabin): if isinstance(cabin, str): if cabin[0] == 'T': return np.nan return cabin[0] return cabin train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test]) # feature engineering described in previous notebooks full['Embarked'].fillna('C', inplace=True) full['Fare'].fillna(8.05, inplace=True) full['Title'] = full['Name'].apply(get_title_grouped) full['Deck'] = full['Cabin'].apply(get_deck) full['Family size'] = full['Parch'] + full['SibSp'] ticket_nums = [int(n.split()[-1]) for n in full['Ticket'].values if n.split()[-1].isdigit()] ticket_nums = [num for num in ticket_nums if num < 2000000] def get_ticket_num(ticket): ticket_num = ticket.split() ticket_num = ''.join((char for char in ticket_num[-1].strip() if char not in string.punctuation)) if not ticket_num.isdigit(): return np.nan return int(ticket_num) full['Ticket number'] = full['Ticket'].apply(get_ticket_num) full['Ticket number'].fillna(np.nanmedian(full['Ticket number'].values), inplace=True) full.drop(['Name', 'Ticket', 'Cabin', 'Parch', 'SibSp'], axis=1, inplace=True) encoders = {} to_encode = ['Embarked', 'Sex', 'Title'] for col in to_encode: encoders[col] = LabelEncoder() encoders[col].fit(full[col]) full[col] = full[col].apply(encoders[col].transform) age_train = full[full['Age'].notnull()] age_predict = full[~full['Age'].notnull()] lr = LinearRegression() lr.fit(age_train.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1), age_train['Age']) predicted_ages = lr.predict(age_predict.drop(['Deck', 'Survived', 'PassengerId', 'Age'], axis=1)) age_predict['Age'] = [max(0.0, age) for age in predicted_ages] full = pd.concat([age_train, age_predict]).sort_values('PassengerId') ages = age_train.Age ages.plot.kde(label='Original') ages = full.Age ages.plot.kde(label='With predicted missing values') plt.xlabel('Age') plt.legend() plt.show()
code
50227272/cell_21
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import plotly.graph_objects as go import pycountry import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore') import plotly as py import plotly.graph_objects as go from plotly import tools def drop(df): df = df.drop(df.index[0]) return df df_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='latin1') df_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', encoding='latin1') df_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv') df_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') df_2018 = drop(df_2018) df_2019 = drop(df_2019) df_2020 = drop(df_2020) num_qn = df_2020.columns stat_2017 = df_2017['GenderSelect'][0:].value_counts() f_2017 = round(stat_2017['Female'] / np.sum(stat_2017) * 100, 2) stat_2018 = df_2018['Q1'][0:].value_counts() f_2018 = round(stat_2018['Female'] / np.sum(stat_2018) * 100, 2) stat_2019 = df_2019['Q2'][0:].value_counts() f_2019 = round(stat_2019['Female'] / np.sum(stat_2019) * 100, 2) stat_2020 = df_2020['Q2'][0:].value_counts() f_2020 = round(stat_2020['Woman'] / np.sum(stat_2020) * 100, 2) color = ['rgb(49,130,189)'] color_m = ['rgb(49,130,189)', '#de6560'] mode_size = [12] line_size = [5] x_data = np.vstack((np.arange(2017, 2021),) * 1) y_data = np.array([[f_2017, f_2018, f_2019, f_2020]]) fig = go.Figure() for i in range(0, 1): fig.add_trace(go.Scatter(x=x_data[i], y=y_data[i], mode='lines', line=dict(color=color[i], width=line_size[i]), connectgaps=True)) fig.add_trace(go.Scatter(x=[x_data[i][0], x_data[i][-1]], y=[y_data[i][0], y_data[i][-1]], mode='markers', marker=dict(color=color_m[i], size=mode_size[i]))) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Relative number of female participants</span>", xaxis=dict(showline=True, showgrid=False, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=15, color='rgb(82, 82, 82)')), yaxis=dict(showgrid=False, zeroline=False, showline=False, showticklabels=False), autosize=False, margin=dict(autoexpand=True, l=200, r=20, t=100), width=600, height=400, showlegend=False, plot_bgcolor='white') annotations = [] for y_trace, color in zip(y_data, color): annotations.append(dict(xref='paper', x=0.04, y=y_trace[0], xanchor='left', yanchor='bottom', text='{}%'.format(y_trace[0]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', x=0.9, y=y_trace[3], xanchor='right', yanchor='middle', text='{}%'.format(y_trace[3]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.2, xanchor='center', yanchor='top', text='Source: 2017 - 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations) age_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q1'].value_counts() age = [] percent_age = [] for i, j in enumerate(age_2020.index): age.append(j) percent_age.append(round(age_2020[i] / np.sum(age_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = age y_data = percent_age white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(y=x_data, x=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 10), text=y_data, texttemplate=[white] * 6 + [black] * 5, textposition=['inside'] * 6 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Age groups</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female paricipants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(side='top', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', tickfont=dict(family='PT sans', size=18), color='rgb(82, 82, 82)'), barmode='group', bargap=0.05, bargroupgap=0.1, width=800, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) import pycountry country_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q3'] country_2020 = country_2020.value_counts() country = [] percent = [] for i, j in enumerate(country_2020.index): country.append(j) percent.append(round(country_2020[i] / np.sum(country_2020) * 100, 3)) country[1] = 'United States' country[4] = 'United Kingdom' country[7] = 'Russian Federation' country[12] = 'Iran, Islamic Republic of' country[13] = 'Taiwan, Province of China' country[20] = 'Korea, Republic of' country[-1] = "Korea, Democratic People's Republic of" input_countries = country countries = {} for cntry in pycountry.countries: countries[cntry.name] = cntry.alpha_3 codes = [countries.get(cntry, 'Unknown code') for cntry in input_countries] del codes[2:3] del percent[2:3] fig = go.Figure(data=go.Choropleth(locations=codes, z=percent, text=percent, colorscale='Reds', autocolorscale=False, reversescale=False, marker_line_color='darkgray', marker_line_width=0.5, colorbar_title="<span style='color:#000; font-size:16px; font-family:PT Sans'>Percentage</span><br>")) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Location</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female participants</span><br>", margin=dict(t=150), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'), width=700, height=600, annotations=[dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)]) fig.show()
code
50227272/cell_25
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import plotly.graph_objects as go import pycountry import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore') import plotly as py import plotly.graph_objects as go from plotly import tools def drop(df): df = df.drop(df.index[0]) return df df_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='latin1') df_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', encoding='latin1') df_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv') df_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') df_2018 = drop(df_2018) df_2019 = drop(df_2019) df_2020 = drop(df_2020) num_qn = df_2020.columns stat_2017 = df_2017['GenderSelect'][0:].value_counts() f_2017 = round(stat_2017['Female'] / np.sum(stat_2017) * 100, 2) stat_2018 = df_2018['Q1'][0:].value_counts() f_2018 = round(stat_2018['Female'] / np.sum(stat_2018) * 100, 2) stat_2019 = df_2019['Q2'][0:].value_counts() f_2019 = round(stat_2019['Female'] / np.sum(stat_2019) * 100, 2) stat_2020 = df_2020['Q2'][0:].value_counts() f_2020 = round(stat_2020['Woman'] / np.sum(stat_2020) * 100, 2) color = ['rgb(49,130,189)'] color_m = ['rgb(49,130,189)', '#de6560'] mode_size = [12] line_size = [5] x_data = np.vstack((np.arange(2017, 2021),) * 1) y_data = np.array([[f_2017, f_2018, f_2019, f_2020]]) fig = go.Figure() for i in range(0, 1): fig.add_trace(go.Scatter(x=x_data[i], y=y_data[i], mode='lines', line=dict(color=color[i], width=line_size[i]), connectgaps=True)) fig.add_trace(go.Scatter(x=[x_data[i][0], x_data[i][-1]], y=[y_data[i][0], y_data[i][-1]], mode='markers', marker=dict(color=color_m[i], size=mode_size[i]))) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Relative number of female participants</span>", xaxis=dict(showline=True, showgrid=False, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=15, color='rgb(82, 82, 82)')), yaxis=dict(showgrid=False, zeroline=False, showline=False, showticklabels=False), autosize=False, margin=dict(autoexpand=True, l=200, r=20, t=100), width=600, height=400, showlegend=False, plot_bgcolor='white') annotations = [] for y_trace, color in zip(y_data, color): annotations.append(dict(xref='paper', x=0.04, y=y_trace[0], xanchor='left', yanchor='bottom', text='{}%'.format(y_trace[0]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', x=0.9, y=y_trace[3], xanchor='right', yanchor='middle', text='{}%'.format(y_trace[3]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.2, xanchor='center', yanchor='top', text='Source: 2017 - 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations) age_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q1'].value_counts() age = [] percent_age = [] for i, j in enumerate(age_2020.index): age.append(j) percent_age.append(round(age_2020[i] / np.sum(age_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = age y_data = percent_age white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(y=x_data, x=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 10), text=y_data, texttemplate=[white] * 6 + [black] * 5, textposition=['inside'] * 6 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Age groups</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female paricipants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(side='top', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', tickfont=dict(family='PT sans', size=18), color='rgb(82, 82, 82)'), barmode='group', bargap=0.05, bargroupgap=0.1, width=800, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) import pycountry country_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q3'] country_2020 = country_2020.value_counts() country = [] percent = [] for i, j in enumerate(country_2020.index): country.append(j) percent.append(round(country_2020[i] / np.sum(country_2020) * 100, 3)) country[1] = 'United States' country[4] = 'United Kingdom' country[7] = 'Russian Federation' country[12] = 'Iran, Islamic Republic of' country[13] = 'Taiwan, Province of China' country[20] = 'Korea, Republic of' country[-1] = "Korea, Democratic People's Republic of" input_countries = country countries = {} for cntry in pycountry.countries: countries[cntry.name] = cntry.alpha_3 codes = [countries.get(cntry, 'Unknown code') for cntry in input_countries] del codes[2:3] del percent[2:3] fig = go.Figure(data=go.Choropleth(locations=codes, z=percent, text=percent, colorscale='Reds', autocolorscale=False, reversescale=False, marker_line_color='darkgray', marker_line_width=0.5, colorbar_title="<span style='color:#000; font-size:16px; font-family:PT Sans'>Percentage</span><br>")) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Location</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female participants</span><br>", margin=dict(t=150), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'), width=700, height=600, annotations=[dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)]) education_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q4'].value_counts() education = [] percent_edu = [] for i, j in enumerate(education_2020.index): education.append(j) percent_edu.append(round(education_2020[i] / np.sum(education_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = percent_edu y_data = education white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(x=x_data, y=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 6), text=x_data, texttemplate=[white] * 2 + [black] * 5, textposition=['inside'] * 2 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Level of education</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female participants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(autorange='reversed', side='right', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', side='right', tickfont=dict(family='PT sans', size=18, color='rgb(82, 82, 82)')), barmode='group', bargap=0.05, bargroupgap=0.1, width=700, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=1, y=-0.11, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.show()
code
50227272/cell_29
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import plotly.graph_objects as go import pycountry import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore') import plotly as py import plotly.graph_objects as go from plotly import tools def drop(df): df = df.drop(df.index[0]) return df df_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='latin1') df_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', encoding='latin1') df_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv') df_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') df_2018 = drop(df_2018) df_2019 = drop(df_2019) df_2020 = drop(df_2020) num_qn = df_2020.columns stat_2017 = df_2017['GenderSelect'][0:].value_counts() f_2017 = round(stat_2017['Female'] / np.sum(stat_2017) * 100, 2) stat_2018 = df_2018['Q1'][0:].value_counts() f_2018 = round(stat_2018['Female'] / np.sum(stat_2018) * 100, 2) stat_2019 = df_2019['Q2'][0:].value_counts() f_2019 = round(stat_2019['Female'] / np.sum(stat_2019) * 100, 2) stat_2020 = df_2020['Q2'][0:].value_counts() f_2020 = round(stat_2020['Woman'] / np.sum(stat_2020) * 100, 2) color = ['rgb(49,130,189)'] color_m = ['rgb(49,130,189)', '#de6560'] mode_size = [12] line_size = [5] x_data = np.vstack((np.arange(2017, 2021),) * 1) y_data = np.array([[f_2017, f_2018, f_2019, f_2020]]) fig = go.Figure() for i in range(0, 1): fig.add_trace(go.Scatter(x=x_data[i], y=y_data[i], mode='lines', line=dict(color=color[i], width=line_size[i]), connectgaps=True)) fig.add_trace(go.Scatter(x=[x_data[i][0], x_data[i][-1]], y=[y_data[i][0], y_data[i][-1]], mode='markers', marker=dict(color=color_m[i], size=mode_size[i]))) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Relative number of female participants</span>", xaxis=dict(showline=True, showgrid=False, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=15, color='rgb(82, 82, 82)')), yaxis=dict(showgrid=False, zeroline=False, showline=False, showticklabels=False), autosize=False, margin=dict(autoexpand=True, l=200, r=20, t=100), width=600, height=400, showlegend=False, plot_bgcolor='white') annotations = [] for y_trace, color in zip(y_data, color): annotations.append(dict(xref='paper', x=0.04, y=y_trace[0], xanchor='left', yanchor='bottom', text='{}%'.format(y_trace[0]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', x=0.9, y=y_trace[3], xanchor='right', yanchor='middle', text='{}%'.format(y_trace[3]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.2, xanchor='center', yanchor='top', text='Source: 2017 - 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations) age_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q1'].value_counts() age = [] percent_age = [] for i, j in enumerate(age_2020.index): age.append(j) percent_age.append(round(age_2020[i] / np.sum(age_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = age y_data = percent_age white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(y=x_data, x=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 10), text=y_data, texttemplate=[white] * 6 + [black] * 5, textposition=['inside'] * 6 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Age groups</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female paricipants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(side='top', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', tickfont=dict(family='PT sans', size=18), color='rgb(82, 82, 82)'), barmode='group', bargap=0.05, bargroupgap=0.1, width=800, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) import pycountry country_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q3'] country_2020 = country_2020.value_counts() country = [] percent = [] for i, j in enumerate(country_2020.index): country.append(j) percent.append(round(country_2020[i] / np.sum(country_2020) * 100, 3)) country[1] = 'United States' country[4] = 'United Kingdom' country[7] = 'Russian Federation' country[12] = 'Iran, Islamic Republic of' country[13] = 'Taiwan, Province of China' country[20] = 'Korea, Republic of' country[-1] = "Korea, Democratic People's Republic of" input_countries = country countries = {} for cntry in pycountry.countries: countries[cntry.name] = cntry.alpha_3 codes = [countries.get(cntry, 'Unknown code') for cntry in input_countries] del codes[2:3] del percent[2:3] fig = go.Figure(data=go.Choropleth(locations=codes, z=percent, text=percent, colorscale='Reds', autocolorscale=False, reversescale=False, marker_line_color='darkgray', marker_line_width=0.5, colorbar_title="<span style='color:#000; font-size:16px; font-family:PT Sans'>Percentage</span><br>")) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Location</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female participants</span><br>", margin=dict(t=150), geo=dict(showframe=False, showcoastlines=False, projection_type='equirectangular'), width=700, height=600, annotations=[dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)]) education_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q4'].value_counts() education = [] percent_edu = [] for i, j in enumerate(education_2020.index): education.append(j) percent_edu.append(round(education_2020[i] / np.sum(education_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = percent_edu y_data = education white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(x=x_data, y=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 6), text=x_data, texttemplate=[white] * 2 + [black] * 5, textposition=['inside'] * 2 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Level of education</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female participants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(autorange='reversed', side='right', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', side='right', tickfont=dict(family='PT sans', size=18, color='rgb(82, 82, 82)')), barmode='group', bargap=0.05, bargroupgap=0.1, width=700, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=1, y=-0.11, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) qns = [x for x in num_qn] def data(qnn): qn = [x for x in qns if qnn in x] name = [] for q in qn: for x in df_2020[q].unique(): name.append(x) name = [x for x in name if str(x) != 'nan'] name = [x.strip(' ') for x in name] name_percent = (df_2020.shape[0] - df_2020[qn].isnull().sum()) / df_2020.shape[0] name_percent.index = name name_percent = name_percent.sort_values(ascending=False) name = [] percent_name = [] for i, j in enumerate(name_percent.index): name.append(j) percent_name.append(round(name_percent[i] * 100, 2)) return (name, percent_name) language, percent_lng = data('Q7') color_first = '#de6560' color_rest = '#98c1d9' x_data = percent_lng y_data = language white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(x=x_data, y=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 12), text=x_data, texttemplate=[white] * 7 + [black] * 6, textposition=['inside'] * 7 + ['outside'] * 6) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Programming language</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>all participants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(side='top', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', tickfont=dict(family='PT sans', size=18, color='rgb(82, 82, 82)')), barmode='group', bargap=0.05, bargroupgap=0.1, width=700, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.show()
code
50227272/cell_17
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import plotly.graph_objects as go import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore') import plotly as py import plotly.graph_objects as go from plotly import tools def drop(df): df = df.drop(df.index[0]) return df df_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='latin1') df_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', encoding='latin1') df_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv') df_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') df_2018 = drop(df_2018) df_2019 = drop(df_2019) df_2020 = drop(df_2020) num_qn = df_2020.columns stat_2017 = df_2017['GenderSelect'][0:].value_counts() f_2017 = round(stat_2017['Female'] / np.sum(stat_2017) * 100, 2) stat_2018 = df_2018['Q1'][0:].value_counts() f_2018 = round(stat_2018['Female'] / np.sum(stat_2018) * 100, 2) stat_2019 = df_2019['Q2'][0:].value_counts() f_2019 = round(stat_2019['Female'] / np.sum(stat_2019) * 100, 2) stat_2020 = df_2020['Q2'][0:].value_counts() f_2020 = round(stat_2020['Woman'] / np.sum(stat_2020) * 100, 2) color = ['rgb(49,130,189)'] color_m = ['rgb(49,130,189)', '#de6560'] mode_size = [12] line_size = [5] x_data = np.vstack((np.arange(2017, 2021),) * 1) y_data = np.array([[f_2017, f_2018, f_2019, f_2020]]) fig = go.Figure() for i in range(0, 1): fig.add_trace(go.Scatter(x=x_data[i], y=y_data[i], mode='lines', line=dict(color=color[i], width=line_size[i]), connectgaps=True)) fig.add_trace(go.Scatter(x=[x_data[i][0], x_data[i][-1]], y=[y_data[i][0], y_data[i][-1]], mode='markers', marker=dict(color=color_m[i], size=mode_size[i]))) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Relative number of female participants</span>", xaxis=dict(showline=True, showgrid=False, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=15, color='rgb(82, 82, 82)')), yaxis=dict(showgrid=False, zeroline=False, showline=False, showticklabels=False), autosize=False, margin=dict(autoexpand=True, l=200, r=20, t=100), width=600, height=400, showlegend=False, plot_bgcolor='white') annotations = [] for y_trace, color in zip(y_data, color): annotations.append(dict(xref='paper', x=0.04, y=y_trace[0], xanchor='left', yanchor='bottom', text='{}%'.format(y_trace[0]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', x=0.9, y=y_trace[3], xanchor='right', yanchor='middle', text='{}%'.format(y_trace[3]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.2, xanchor='center', yanchor='top', text='Source: 2017 - 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations) age_2020 = df_2020[df_2020['Q2'] == 'Woman']['Q1'].value_counts() age = [] percent_age = [] for i, j in enumerate(age_2020.index): age.append(j) percent_age.append(round(age_2020[i] / np.sum(age_2020) * 100, 2)) color_first = '#de6560' color_rest = '#98c1d9' x_data = age y_data = percent_age white = "<b style='color: #fff; font-size:15px; font-family:PT Sans'> %{text}% </b>" black = "<b style='color: #000; font-size:15px; font-family:PT Sans'> %{text}% </b>" trace = go.Bar(y=x_data, x=y_data, orientation='h', marker=dict(color=[color_first] + [color_rest] * 10), text=y_data, texttemplate=[white] * 6 + [black] * 5, textposition=['inside'] * 6 + ['outside'] * 5) layout = dict(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Age groups</span><br><span style='color:#969696; font-size: 20px; font-family:PT Sans'>female paricipants</span><br>", margin=dict(t=150), legend=dict(orientation='h', yanchor='top', xanchor='center', y=1.06, x=0.5, font=dict(size=16)), xaxis=dict(side='top', showline=True, showgrid=True, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=14, color='rgb(82, 82, 82)')), yaxis=dict(autorange='reversed', tickfont=dict(family='PT sans', size=18), color='rgb(82, 82, 82)'), barmode='group', bargap=0.05, bargroupgap=0.1, width=800, height=600, plot_bgcolor='white') fig = go.Figure(data=trace, layout=layout) fig.add_annotation(dict(xref='paper', yref='paper', x=0.5, y=0, xanchor='center', yanchor='top', text='Source: 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.show()
code
50227272/cell_14
[ "text_html_output_2.png" ]
import numpy as np import pandas as pd import plotly.graph_objects as go import warnings import numpy as np import pandas as pd import matplotlib.pyplot as plt import warnings warnings.filterwarnings(action='ignore') import plotly as py import plotly.graph_objects as go from plotly import tools def drop(df): df = df.drop(df.index[0]) return df df_2017 = pd.read_csv('../input/kaggle-survey-2017/multipleChoiceResponses.csv', encoding='latin1') df_2018 = pd.read_csv('../input/kaggle-survey-2018/multipleChoiceResponses.csv', encoding='latin1') df_2019 = pd.read_csv('../input/kaggle-survey-2019/multiple_choice_responses.csv') df_2020 = pd.read_csv('../input/kaggle-survey-2020/kaggle_survey_2020_responses.csv') df_2018 = drop(df_2018) df_2019 = drop(df_2019) df_2020 = drop(df_2020) num_qn = df_2020.columns stat_2017 = df_2017['GenderSelect'][0:].value_counts() f_2017 = round(stat_2017['Female'] / np.sum(stat_2017) * 100, 2) stat_2018 = df_2018['Q1'][0:].value_counts() f_2018 = round(stat_2018['Female'] / np.sum(stat_2018) * 100, 2) stat_2019 = df_2019['Q2'][0:].value_counts() f_2019 = round(stat_2019['Female'] / np.sum(stat_2019) * 100, 2) stat_2020 = df_2020['Q2'][0:].value_counts() f_2020 = round(stat_2020['Woman'] / np.sum(stat_2020) * 100, 2) color = ['rgb(49,130,189)'] color_m = ['rgb(49,130,189)', '#de6560'] mode_size = [12] line_size = [5] x_data = np.vstack((np.arange(2017, 2021),) * 1) y_data = np.array([[f_2017, f_2018, f_2019, f_2020]]) fig = go.Figure() for i in range(0, 1): fig.add_trace(go.Scatter(x=x_data[i], y=y_data[i], mode='lines', line=dict(color=color[i], width=line_size[i]), connectgaps=True)) fig.add_trace(go.Scatter(x=[x_data[i][0], x_data[i][-1]], y=[y_data[i][0], y_data[i][-1]], mode='markers', marker=dict(color=color_m[i], size=mode_size[i]))) fig.update_layout(title="<span style='color:#000; font-size:25px; font-family:PT Sans'>Relative number of female participants</span>", xaxis=dict(showline=True, showgrid=False, showticklabels=True, linecolor='rgb(204, 204, 204)', linewidth=2, ticks='outside', tickfont=dict(family='PT sans', size=15, color='rgb(82, 82, 82)')), yaxis=dict(showgrid=False, zeroline=False, showline=False, showticklabels=False), autosize=False, margin=dict(autoexpand=True, l=200, r=20, t=100), width=600, height=400, showlegend=False, plot_bgcolor='white') annotations = [] for y_trace, color in zip(y_data, color): annotations.append(dict(xref='paper', x=0.04, y=y_trace[0], xanchor='left', yanchor='bottom', text='{}%'.format(y_trace[0]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', x=0.9, y=y_trace[3], xanchor='right', yanchor='middle', text='{}%'.format(y_trace[3]), font=dict(family='PT sans', size=18, color='rgb(82, 82, 82)'), showarrow=False)) annotations.append(dict(xref='paper', yref='paper', x=0.5, y=-0.2, xanchor='center', yanchor='top', text='Source: 2017 - 2020 Kaggle Machine Learning & ' + 'Data Science Survey', font=dict(family='PT sans', size=12, color='rgb(150,150,150)'), showarrow=False)) fig.update_layout(annotations=annotations) fig.show()
code
18135360/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'})
code
18135360/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult.head()
code
18135360/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes
code
18135360/cell_20
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True) adult.isnull().sum() adult.dtypes adult = pd.get_dummies(adult) y = adult['income'] X = adult.drop('income', axis=1) X
code
18135360/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape
code
18135360/cell_2
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN'])
code
18135360/cell_11
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.head()
code
18135360/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True) adult.isnull().sum() adult.dtypes adult = pd.get_dummies(adult) y = adult['income'] X = adult.drop('income', axis=1)
code
18135360/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns
code
18135360/cell_18
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True) adult.isnull().sum() adult.dtypes adult = pd.get_dummies(adult)
code
18135360/cell_8
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1)
code
18135360/cell_15
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True)
code
18135360/cell_16
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True) adult.isnull().sum()
code
18135360/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.head()
code
18135360/cell_17
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'}) adult.dropna(axis=0, inplace=True) adult.isnull().sum() adult.dtypes
code
18135360/cell_14
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum() adult.workclass.value_counts() adult = adult.fillna({'workclass': 'Private'}) adult.occupation.value_counts() adult = adult.fillna({'occupation': 'Prof-specialty'})
code
18135360/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0)
code
18135360/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.shape adult.columns adult = adult.drop('fnlwgt', axis=1) adult = adult.replace('>50K', 1) adult = adult.replace('<=50K', 0) adult.isnull().sum()
code
18135360/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd adult = pd.read_csv('adult.csv', na_values=['?', ',', 'NaN']) adult.dtypes adult.head()
code
34123573/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values y_k
code
34123573/cell_25
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans label = model.labels_ label
code
34123573/cell_4
[ "image_output_1.png" ]
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np import plotly import plotly.express as px import cufflinks as cf import plotly.offline as pyo from plotly.offline import init_notebook_mode, plot, iplot pyo.init_notebook_mode(connected=True) cf.go_offline()
code
34123573/cell_33
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') f,axes=plt.subplots(1,3,figsize=(20,20)) sns.distplot(df['Annual Income (k$)'],color='red',label="nannualincome",ax=axes[0]) sns.distplot(df['Age'],color='yellow',label="age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'],color="skyblue", label="Spending Score",ax=axes[2]) f, axes = plt.subplots(1, 3, figsize=(20, 5)) #sharex=True) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Male"],color="salmon", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Female"],color="skyblue", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Age'][df['Gender']=="Male"],color="salmon", label="Age",ax=axes[1]) sns.distplot(df['Age'][df['Gender']=="Female"],color="skyblue", label="Age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Male"],color="salmon", label="Spending Score",ax=axes[2]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Female"],color="skyblue", label="Spending Score",ax=axes[2]) plt.show() f, axes = plt.subplots(1, 2, figsize=(20, 10)) #sharex=True) sns.scatterplot(x="Age", y="Spending Score (1-100)",hue="Gender", data=df, ax=axes[0]) sns.scatterplot(x="Age", y="Annual Income (k$)",hue="Gender", data=df, ax=axes[1]) x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans label = model.labels_ label unique_labels = set(model.labels_) unique_labels target = pd.DataFrame({'target': model.labels_}) df_new = pd.concat([df, target], axis=1, sort=False) df_new = df_new.drop(['CustomerID'], axis=1) df_new
code
34123573/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k
code
34123573/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') print(df.head())
code
34123573/cell_29
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') f,axes=plt.subplots(1,3,figsize=(20,20)) sns.distplot(df['Annual Income (k$)'],color='red',label="nannualincome",ax=axes[0]) sns.distplot(df['Age'],color='yellow',label="age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'],color="skyblue", label="Spending Score",ax=axes[2]) f, axes = plt.subplots(1, 3, figsize=(20, 5)) #sharex=True) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Male"],color="salmon", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Female"],color="skyblue", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Age'][df['Gender']=="Male"],color="salmon", label="Age",ax=axes[1]) sns.distplot(df['Age'][df['Gender']=="Female"],color="skyblue", label="Age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Male"],color="salmon", label="Spending Score",ax=axes[2]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Female"],color="skyblue", label="Spending Score",ax=axes[2]) plt.show() f, axes = plt.subplots(1, 2, figsize=(20, 10)) #sharex=True) sns.scatterplot(x="Age", y="Spending Score (1-100)",hue="Gender", data=df, ax=axes[0]) sns.scatterplot(x="Age", y="Annual Income (k$)",hue="Gender", data=df, ax=axes[1]) x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans label = model.labels_ label unique_labels = set(model.labels_) unique_labels for c in unique_labels: plt.scatter(x_k2[model.labels_ == c, 0], x_k2[model.labels_ == c, 1], label='cluster{}'.format(c)) plt.scatter(model.cluster_centers_[:, 0], model.cluster_centers_[:, 1], s=300, c='red', label='Centroids') plt.xlabel('Income') plt.ylabel('Spending Score') plt.legend() plt.show()
code
34123573/cell_8
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') df.describe()
code
34123573/cell_15
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') f,axes=plt.subplots(1,3,figsize=(20,20)) sns.distplot(df['Annual Income (k$)'],color='red',label="nannualincome",ax=axes[0]) sns.distplot(df['Age'],color='yellow',label="age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'],color="skyblue", label="Spending Score",ax=axes[2]) f, axes = plt.subplots(1, 3, figsize=(20, 5)) #sharex=True) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Male"],color="salmon", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Female"],color="skyblue", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Age'][df['Gender']=="Male"],color="salmon", label="Age",ax=axes[1]) sns.distplot(df['Age'][df['Gender']=="Female"],color="skyblue", label="Age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Male"],color="salmon", label="Spending Score",ax=axes[2]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Female"],color="skyblue", label="Spending Score",ax=axes[2]) plt.show() f, axes = plt.subplots(1, 2, figsize=(20, 10)) sns.scatterplot(x='Age', y='Spending Score (1-100)', hue='Gender', data=df, ax=axes[0]) sns.scatterplot(x='Age', y='Annual Income (k$)', hue='Gender', data=df, ax=axes[1])
code
34123573/cell_3
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34123573/cell_31
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') f,axes=plt.subplots(1,3,figsize=(20,20)) sns.distplot(df['Annual Income (k$)'],color='red',label="nannualincome",ax=axes[0]) sns.distplot(df['Age'],color='yellow',label="age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'],color="skyblue", label="Spending Score",ax=axes[2]) f, axes = plt.subplots(1, 3, figsize=(20, 5)) #sharex=True) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Male"],color="salmon", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Annual Income (k$)'][df['Gender']=="Female"],color="skyblue", label="Annual Income (k$)",ax=axes[0]) sns.distplot(df['Age'][df['Gender']=="Male"],color="salmon", label="Age",ax=axes[1]) sns.distplot(df['Age'][df['Gender']=="Female"],color="skyblue", label="Age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Male"],color="salmon", label="Spending Score",ax=axes[2]) sns.distplot(df['Spending Score (1-100)'][df['Gender']=="Female"],color="skyblue", label="Spending Score",ax=axes[2]) plt.show() f, axes = plt.subplots(1, 2, figsize=(20, 10)) #sharex=True) sns.scatterplot(x="Age", y="Spending Score (1-100)",hue="Gender", data=df, ax=axes[0]) sns.scatterplot(x="Age", y="Annual Income (k$)",hue="Gender", data=df, ax=axes[1]) x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans label = model.labels_ label unique_labels = set(model.labels_) unique_labels plt.figure(1, figsize=(17, 8)) plt.scatter(x_k2[y_kmeans == 0, 0], x_k2[y_kmeans == 0, 1], s=100, c='red', label='Standard people') plt.scatter(x_k2[y_kmeans == 1, 0], x_k2[y_kmeans == 1, 1], s=100, c='yellow', label='Tightwad people') plt.scatter(x_k2[y_kmeans == 2, 0], x_k2[y_kmeans == 2, 1], s=100, c='aqua', label='Normal people') plt.scatter(x_k2[y_kmeans == 3, 0], x_k2[y_kmeans == 3, 1], s=100, c='violet', label='Careless people(TARGET)') plt.scatter(x_k2[y_kmeans == 4, 0], x_k2[y_kmeans == 4, 1], s=100, c='lightgreen', label='Rich people(TARGET)') plt.scatter(model.cluster_centers_[:, 0], model.cluster_centers_[:, 1], s=300, c='black', label='Centroids') plt.title('Clusters of customers') plt.xlabel('Annual Income (k$)') plt.ylabel('Spending Score (1-100)') plt.legend() plt.show()
code
34123573/cell_24
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.cluster import KMeans import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans
code
34123573/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') f,axes=plt.subplots(1,3,figsize=(20,20)) sns.distplot(df['Annual Income (k$)'],color='red',label="nannualincome",ax=axes[0]) sns.distplot(df['Age'],color='yellow',label="age",ax=axes[1]) sns.distplot(df['Spending Score (1-100)'],color="skyblue", label="Spending Score",ax=axes[2]) f, axes = plt.subplots(1, 3, figsize=(20, 5)) sns.distplot(df['Annual Income (k$)'][df['Gender'] == 'Male'], color='salmon', label='Annual Income (k$)', ax=axes[0]) sns.distplot(df['Annual Income (k$)'][df['Gender'] == 'Female'], color='skyblue', label='Annual Income (k$)', ax=axes[0]) sns.distplot(df['Age'][df['Gender'] == 'Male'], color='salmon', label='Age', ax=axes[1]) sns.distplot(df['Age'][df['Gender'] == 'Female'], color='skyblue', label='Age', ax=axes[1]) sns.distplot(df['Spending Score (1-100)'][df['Gender'] == 'Male'], color='salmon', label='Spending Score', ax=axes[2]) sns.distplot(df['Spending Score (1-100)'][df['Gender'] == 'Female'], color='skyblue', label='Spending Score', ax=axes[2]) plt.show()
code
34123573/cell_22
[ "image_output_1.png" ]
import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2
code
34123573/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') sns.heatmap(df.drop(['CustomerID'], axis=1).corr(), annot=True)
code
34123573/cell_27
[ "text_plain_output_1.png" ]
from sklearn.cluster import KMeans import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv') x_k = df['Annual Income (k$)'].values y_k = df['Spending Score (1-100)'].values x_k2 = list(zip(x_k, y_k)) x_k2 = np.array(x_k2) x_k2 model = KMeans(n_clusters=5) model.fit(x_k2) y_kmeans = model.predict(x_k2) y_kmeans label = model.labels_ label unique_labels = set(model.labels_) unique_labels
code
88104888/cell_3
[ "text_html_output_1.png" ]
import pandas as pd data_path = '../input/nuclio10-dsc-1121/sales_train_merged.csv' df = pd.read_csv(data_path, index_col=0) df.head()
code
72115608/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) X.head()
code
72115608/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.head()
code
72115608/cell_23
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result = pd.DataFrame(result, columns=['target']) result = result[['id', 'target']] result.head()
code
72115608/cell_20
[ "text_plain_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result = pd.DataFrame(result, columns=['target']) result
code
72115608/cell_29
[ "text_html_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result = pd.DataFrame(result, columns=['target']) result['id'] = X_test.index result = result[['id', 'target']] result.to_csv('submission.csv', index=False) output = result[['id', 'target']] output.to_csv('submission.csv', index=False) output = pd.DataFrame({'Id': X_test.index, 'target': predictions}) output.to_csv('submission.csv', index=False)
code
72115608/cell_11
[ "text_html_output_1.png" ]
from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) X_test.head()
code
72115608/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) features.head()
code
72115608/cell_18
[ "text_html_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result
code
72115608/cell_15
[ "text_html_output_1.png" ]
from keras.layers import Dense def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape
code
72115608/cell_16
[ "text_html_output_1.png" ]
from keras.layers import Dense import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) losses.plot()
code
72115608/cell_14
[ "text_plain_output_1.png" ]
from keras.layers import Dense def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary()
code
72115608/cell_22
[ "text_plain_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result = pd.DataFrame(result, columns=['target']) result
code
72115608/cell_27
[ "text_plain_output_1.png" ]
from keras.layers import Dense from sklearn.preprocessing import OrdinalEncoder import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) y = train['target'] features = train.drop(['target'], axis=1) object_cols = [col for col in features.columns if 'cat' in col] X = features.copy() X_test = test.copy() ordinal_encoder = OrdinalEncoder() X[object_cols] = ordinal_encoder.fit_transform(features[object_cols]) X_test[object_cols] = ordinal_encoder.transform(test[object_cols]) def create_model(): model = Sequential() model.add(Dense(320, input_dim=X_train.shape[1], activation='relu')) model.add(Dense(384, activation='relu')) model.add(Dense(352, activation='relu')) model.add(Dense(448, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(160, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(1)) model.compile(optimizer=Adam(learning_rate=0.0001), loss='mse') return model model = create_model() model.summary() X_train.shape early_stop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=10) history = model.fit(x=X_train, y=y_train, validation_split=0.1, batch_size=128, epochs=150, callbacks=[early_stop]) losses = pd.DataFrame(model.history.history) result = model.predict(X_test) result = pd.DataFrame(result, columns=['target']) result = result[['id', 'target']] result.to_csv('submission.csv', index=False) output = result[['id', 'target']] output
code
72115608/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd train = pd.read_csv('../input/30-days-of-ml/train.csv', index_col=0) test = pd.read_csv('../input/30-days-of-ml/test.csv', index_col=0) train.info()
code
105197097/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values
code
105197097/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.head()
code
105197097/cell_23
[ "text_plain_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test_df = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') submission_df = pd.read_csv('/kaggle/input/digit-recognizer/sample_submission.csv') train_df.shape train_df.isna().sum().sum() train_df.values train_df.values[0] train_df.values[:, :1] train_df.values[:, 1:] X = train_df.values[:, 1:] y = train_df.values[:, :1] X.shape X_scale = X / 255.0 dim = int(np.sqrt(X_scale.shape[1])) dim N = X_scale.shape[0] N X_scale = X_scale.reshape((N, dim, dim, 1)) X_scale[0]
code