path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
17105701/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.head()
code
17105701/cell_26
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() happy = pd.read_csv('../input/world-happiness-report-2019.csv') x = happy['Corruption'] > 140 happy[x] x = 2 def f(): x = 3 return x x = 5 def f(): y = 2 * x return y def square(): """ return square of value """ def add(): """ add two local variable """ x = 2 y = 3 z = x + y return z return add() ** 2 print(square())
code
17105701/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns happy.plot(kind='scatter', x='Ladder', y='Log_of', alpha=0.5, color='red') plt.xlabel('Ladder') plt.ylabel('Log_of_GDP\nper_capita') plt.title('Ladder & Log_of_GDP\nper_capita Scatter Plot') plt.show()
code
17105701/cell_19
[ "image_output_1.png" ]
print(3 > 2) print(3 != 2) print(True and False) print(True or False)
code
17105701/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
17105701/cell_7
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.tail()
code
17105701/cell_18
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns plt.clf() happy = pd.read_csv('../input/world-happiness-report-2019.csv') series = happy['Corruption'] print(type(series)) dataFrame = happy[['Corruption']] print(type(dataFrame))
code
17105701/cell_8
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns
code
17105701/cell_15
[ "text_plain_output_1.png" ]
dictionary = {'spain': 'madrid', 'usa': 'vegas'} dictionary['spain'] = 'barcelona' print(dictionary) dictionary['france'] = 'paris' print(dictionary) del dictionary['spain'] print(dictionary) print('france' in dictionary, 'paris' in dictionary) dictionary.clear() print(dictionary)
code
17105701/cell_16
[ "text_plain_output_1.png" ]
dictionary = {'spain': 'madrid', 'usa': 'vegas'} dictionary['spain'] = 'barcelona' dictionary['france'] = 'paris' del dictionary['spain'] dictionary.clear() del dictionary print(dictionary)
code
17105701/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.info()
code
17105701/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
def tuble_ex(): """ return defined t tuble""" t = (1, 2, 3) return t a, b, c = tuble_ex() print(a, b, c)
code
17105701/cell_14
[ "text_html_output_1.png" ]
dictionary = {'spain': 'madrid', 'usa': 'vegas'} print(dictionary.keys()) print(dictionary.values())
code
17105701/cell_22
[ "text_plain_output_1.png" ]
i = 0 while i != 5: print('i is: ', i) i += 1 print(i, ' is equal to 5')
code
17105701/cell_10
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns happy.Freedom.plot(kind='line', color='g', label='Speed', linewidth=1, alpha=0.5, grid=True, linestyle=':') happy.Generosity.plot(color='r', label='Defense', linewidth=1, alpha=0.5, grid=True, linestyle='-.') plt.legend(loc='upper right') plt.xlabel('x axis') plt.ylabel('y axis') plt.title('Line Plot') plt.show()
code
17105701/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr #correlation map f,ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax) #plt.show() happy.columns happy.columns = [each.split()[0] + '_' + each.split()[1] if len(each.split()) > 1 else each for each in happy.columns] happy.columns happy.Corruption.plot(kind='hist', bins=50, figsize=(12, 12)) plt.show()
code
17105701/cell_5
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # visualization tool happy = pd.read_csv('../input/world-happiness-report-2019.csv') happy.corr f, ax = plt.subplots(figsize=(12, 12)) sns.heatmap(happy.corr(), annot=True, linewidths=0.5, fmt='.1f', ax=ax)
code
2015709/cell_13
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_2.png", "image_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.utils import np_utils import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10) from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train_oneHot, validation_split=0.2, epochs=20, batch_size=250, verbose=1) model.evaluate(X_train, y_train_oneHot) df_test = pd.read_csv('../input/test.csv', encoding='big5') X_test = df_test.values X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) / 255 test_img = np.reshape(X_test[:1, :], (28, 28)) prediction = model.predict_classes(X_test) prediction[:1]
code
2015709/cell_9
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.utils import np_utils import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10) from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train_oneHot, validation_split=0.2, epochs=20, batch_size=250, verbose=1) plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) plot_train_history(history, 'loss', 'val_loss') plt.subplot(1, 2, 2) plot_train_history(history, 'acc', 'val_acc')
code
2015709/cell_4
[ "image_output_1.png" ]
import matplotlib.pyplot as plt plt.figure(figsize=(12, 3)) plt.plot(X_train[:1].reshape(-1)) plt.figure(figsize=(6, 6)) plt.matshow(X_train[:1].reshape(28, 28), cmap=plt.get_cmap('binary')) y_train[:1]
code
2015709/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary()
code
2015709/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5]
code
2015709/cell_7
[ "image_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.utils import np_utils import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10) from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train_oneHot, validation_split=0.2, epochs=20, batch_size=250, verbose=1)
code
2015709/cell_3
[ "text_plain_output_1.png" ]
from keras.utils import np_utils import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10)
code
2015709/cell_10
[ "text_html_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.utils import np_utils import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10) from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train_oneHot, validation_split=0.2, epochs=20, batch_size=250, verbose=1) model.evaluate(X_train, y_train_oneHot)
code
2015709/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization from keras.models import Sequential from keras.utils import np_utils import matplotlib.pyplot as plt import matplotlib.pyplot as plt import numpy as np import pandas as pd import pandas as pd import numpy as np df_train = pd.read_csv('../input/train.csv', encoding='big5') df_train[:5] from sklearn.model_selection import train_test_split from keras.utils import np_utils X_train = df_train[df_train.columns[1:]].values y_train = df_train['label'].values X_train = X_train.reshape(X_train.shape[0], 28, 28, 1) / 255 y_train_oneHot = np_utils.to_categorical(y_train, num_classes=10) from keras.models import Sequential from keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization model = Sequential() model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=(28, 28, 1), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=16, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu', kernel_initializer='normal')) model.add(Dropout(0.5)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) history = model.fit(X_train, y_train_oneHot, validation_split=0.2, epochs=20, batch_size=250, verbose=1) df_test = pd.read_csv('../input/test.csv', encoding='big5') X_test = df_test.values X_test = X_test.reshape(X_test.shape[0], 28, 28, 1) / 255 test_img = np.reshape(X_test[:1, :], (28, 28)) plt.matshow(test_img, cmap=plt.get_cmap('binary')) plt.show()
code
1008052/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_train = pd.read_csv('../input/train.csv') titanic_train['Age'].fillna(titanic_train['Age'].median(), inplace=True) titanic_train['Embarked'].fillna('S', inplace=True) titanic_train.describe()
code
1008052/cell_6
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from sklearn.cross_validation import KFold from sklearn.linear_model import LinearRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_train = pd.read_csv('../input/train.csv') titanic_train['Age'].fillna(titanic_train['Age'].median(), inplace=True) titanic_train['Embarked'].fillna('S', inplace=True) titanic_train['Sex'].replace({'male': 0, 'female': 1}, inplace=True) titanic_train['Embarked'].replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True) from sklearn.linear_model import LinearRegression from sklearn.cross_validation import KFold predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] algo = LinearRegression() kf = KFold(titanic_train.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: train_predictors = titanic_train[predictors].iloc[train, :] train_target = titanic_train['Survived'].iloc[train] algo.fit(train_predictors, train_target) test_prediction = algo.predict(titanic_train[predictors].iloc[test, :]) predictions.append(test_prediction) predictions
code
1008052/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
1008052/cell_7
[ "text_plain_output_1.png" ]
from sklearn.cross_validation import KFold from sklearn.linear_model import LinearRegression import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_train = pd.read_csv('../input/train.csv') titanic_train['Age'].fillna(titanic_train['Age'].median(), inplace=True) titanic_train['Embarked'].fillna('S', inplace=True) titanic_train['Sex'].replace({'male': 0, 'female': 1}, inplace=True) titanic_train['Embarked'].replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True) from sklearn.linear_model import LinearRegression from sklearn.cross_validation import KFold predictors = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked'] algo = LinearRegression() kf = KFold(titanic_train.shape[0], n_folds=3, random_state=1) predictions = [] for train, test in kf: train_predictors = titanic_train[predictors].iloc[train, :] train_target = titanic_train['Survived'].iloc[train] algo.fit(train_predictors, train_target) test_prediction = algo.predict(titanic_train[predictors].iloc[test, :]) predictions.append(test_prediction) predictions accuracy = sum([1 if x == True else 0 for x in titanic_train['Survived'] == [1 if p > 0.5 else 0 for p in np.concatenate(predictions, axis=0)]]) / len(titanic_train) accuracy
code
1008052/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_train = pd.read_csv('../input/train.csv') titanic_train.describe()
code
1008052/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) titanic_train = pd.read_csv('../input/train.csv') titanic_train['Age'].fillna(titanic_train['Age'].median(), inplace=True) titanic_train['Embarked'].fillna('S', inplace=True) titanic_train['Sex'].replace({'male': 0, 'female': 1}, inplace=True) titanic_train['Embarked'].replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True) titanic_train.describe()
code
90102951/cell_11
[ "text_plain_output_1.png" ]
from colorama import Fore, Style, Back from dataclasses import dataclass from functools import reduce from typing import Any import math import pandas as pd import re import numpy as np import pandas as pd import re import math from functools import reduce from dataclasses import dataclass data = pd.read_csv('../input/dogsofcambridge2/Dogs_of_Cambridge_2021.csv') clean_data = pd.read_csv('../input/dogsofcambridge2/clean_dogs_reference.csv') clean_data.drop('Unnamed: 0', axis=1, inplace=True) def fix_lat_long(data): lat, long = zip(*[map(lambda a: float(a), re.findall('([-\\d\\.]+)', loc)) for loc in data.Location_masked]) data.Latitude_masked = lat data.Longitude_masked = long data.drop('Location_masked', axis=1, inplace=True) def impute_name(data): data.Dog_Name = ['Poopsy' if pd.isna(name) else name for name in data.Dog_Name] impute_name(data) @dataclass class Location: name: str lat: float long: float def get_distance(self, lat, long): return math.sqrt((self.lat - lat) ** 2 + (self.long - long) ** 2) nhs = [Location('East Cambridge', 42.369204, -71.079015), Location('Area 2/MIT', 42.359145, -71.094415), Location('Wellington-Harrington', 42.371264, -71.092608), Location('The Port', 42.365604, -71.09691), Location('Cambridgeport', 42.3586, -71.109293), Location('Mid-Cambridge', 42.372655, -71.108721), Location('Riverside', 42.36757, -71.1136), Location('Agassiz', 42.380667, -71.116386), Location('Neighborhood Nine', 42.386545, -71.127079), Location('West Cambridge', 42.376936, -71.136375), Location('North Cambridge', 42.394835, -71.132134), Location('Cambridge Highlands', 42.390774, -71.149859), Location('Strawberry Hill', 42.37938, -71.152475)] def closest_neighborhood(lat, long): close = nhs[0] for nh in nhs: if nh.get_distance(lat, long) < close.get_distance(lat, long): close = nh return close.name def impute_neighborhood(data): data.Neighborhood = [closest_neighborhood(data.loc[idx, 'Latitude_masked'], data.loc[idx, 'Longitude_masked']) if pd.isna(item) else item for idx, item in data.Neighborhood.items()] def impute_breed(data): bn, i = ({}, {}) for nh in data.Neighborhood.unique(): i[nh] = [] vc = data[data.Neighborhood == nh].Dog_Breed.value_counts() res = [[] for x in range(vc[0] + 1)] for idx, item in vc.items(): res[item].append(idx) [a.sort() for a in res] res = reduce(lambda a, b: b + (a if a else []), res) bn[nh] = res data.Dog_Breed = [bn[data.loc[idx, 'Neighborhood']][len(i[data.loc[idx, 'Neighborhood']]) + (0 if i[data.loc[idx, 'Neighborhood']].append('') else 0)] if pd.isna(item) else item for idx, item in data.Dog_Breed.items()] @dataclass class Pair: one: Any two: Any def ascii_histogram(data, width=50): res = [] for k, v in data.items(): res.append(Pair(k, v)) res.sort(key=lambda a: a.two, reverse=True) colors = [None, 'BLACK', 'RED', 'GREEN', 'YELLOW', 'BLUE', 'MAGENTA', 'CYAN'] def ascii_scatterplot(data, height=50): global colors if len(data[0]) == 2: data = list(map(lambda a: [a[0], a[1], 1], data)) xs, ys, code = map(list, zip(*data)) xmax, xmin, ymax, ymin = (max(xs), min(xs), max(ys), min(ys)) height = height * 2 scale = (height - 1) / (ymax - ymin) width = round((xmax - xmin) * scale + 0.5) scale_point = lambda x, y: ((x - xmin) * scale, (y - ymin) * scale) chars = [[0 for i in range(width)] for i in range(height)] for x, y, c in data: x, y = scale_point(x, y) chars[round(y - 0.5)][round(x - 0.5)] = c for i in reversed(range(0, height, 2)): res = '' bottom, top = chars[i:i + 2] for i in range(width): if top[i] and bottom[i]: if top[i] == bottom[i]: res += f'{getattr(Fore, colors[top[i]])}█{Style.RESET_ALL}' else: res += f'{getattr(Fore, colors[bottom[i]])}{getattr(Back, colors[top[i]])}▄{Style.RESET_ALL}' elif top[i]: res += f'{getattr(Fore, colors[top[i]])}▀{Style.RESET_ALL}' elif bottom[i]: res += f'{getattr(Fore, colors[bottom[i]])}▄{Style.RESET_ALL}' else: res += ' ' print(res) coords = list(zip(*[data.Longitude_masked, data.Latitude_masked])) ascii_scatterplot(coords, 35)
code
90102951/cell_10
[ "text_plain_output_1.png" ]
from colorama import Fore, Style, Back from dataclasses import dataclass from functools import reduce from typing import Any import math import pandas as pd import re import numpy as np import pandas as pd import re import math from functools import reduce from dataclasses import dataclass data = pd.read_csv('../input/dogsofcambridge2/Dogs_of_Cambridge_2021.csv') clean_data = pd.read_csv('../input/dogsofcambridge2/clean_dogs_reference.csv') clean_data.drop('Unnamed: 0', axis=1, inplace=True) def fix_lat_long(data): lat, long = zip(*[map(lambda a: float(a), re.findall('([-\\d\\.]+)', loc)) for loc in data.Location_masked]) data.Latitude_masked = lat data.Longitude_masked = long data.drop('Location_masked', axis=1, inplace=True) def impute_name(data): data.Dog_Name = ['Poopsy' if pd.isna(name) else name for name in data.Dog_Name] impute_name(data) @dataclass class Location: name: str lat: float long: float def get_distance(self, lat, long): return math.sqrt((self.lat - lat) ** 2 + (self.long - long) ** 2) nhs = [Location('East Cambridge', 42.369204, -71.079015), Location('Area 2/MIT', 42.359145, -71.094415), Location('Wellington-Harrington', 42.371264, -71.092608), Location('The Port', 42.365604, -71.09691), Location('Cambridgeport', 42.3586, -71.109293), Location('Mid-Cambridge', 42.372655, -71.108721), Location('Riverside', 42.36757, -71.1136), Location('Agassiz', 42.380667, -71.116386), Location('Neighborhood Nine', 42.386545, -71.127079), Location('West Cambridge', 42.376936, -71.136375), Location('North Cambridge', 42.394835, -71.132134), Location('Cambridge Highlands', 42.390774, -71.149859), Location('Strawberry Hill', 42.37938, -71.152475)] def closest_neighborhood(lat, long): close = nhs[0] for nh in nhs: if nh.get_distance(lat, long) < close.get_distance(lat, long): close = nh return close.name def impute_neighborhood(data): data.Neighborhood = [closest_neighborhood(data.loc[idx, 'Latitude_masked'], data.loc[idx, 'Longitude_masked']) if pd.isna(item) else item for idx, item in data.Neighborhood.items()] def impute_breed(data): bn, i = ({}, {}) for nh in data.Neighborhood.unique(): i[nh] = [] vc = data[data.Neighborhood == nh].Dog_Breed.value_counts() res = [[] for x in range(vc[0] + 1)] for idx, item in vc.items(): res[item].append(idx) [a.sort() for a in res] res = reduce(lambda a, b: b + (a if a else []), res) bn[nh] = res data.Dog_Breed = [bn[data.loc[idx, 'Neighborhood']][len(i[data.loc[idx, 'Neighborhood']]) + (0 if i[data.loc[idx, 'Neighborhood']].append('') else 0)] if pd.isna(item) else item for idx, item in data.Dog_Breed.items()] @dataclass class Pair: one: Any two: Any def ascii_histogram(data, width=50): res = [] for k, v in data.items(): res.append(Pair(k, v)) res.sort(key=lambda a: a.two, reverse=True) for i, item in enumerate(res): print(f'{(Back.BLACK + Fore.WHITE if i % 2 else Back.CYAN + Fore.BLACK)}' + f"{i + 1}{' ' * (int(item.two * width / res[0].two) - int(math.log(item.two, 10)) - int(math.log(i + 1, 10)))}" + f'{item.two}{Style.RESET_ALL} {item.one}') ascii_histogram(data.Dog_Name.value_counts()[:10].to_dict())
code
322536/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_y = pd.DataFrame() date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean() date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size() i = int(len(date_y) / 3) date_y[:i].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 1') date_y[i:2 * i].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 2') date_y[2 * i:].plot(secondary_y='Frequency', figsize=(20, 5), title='date_y Year 3')
code
322536/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_y = pd.DataFrame() date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean() date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size() i = int(len(date_y) / 3) date_x_freq = pd.DataFrame() date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count() date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count() date_x_freq.plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_x distribution between training/testing set') date_y_freq = pd.DataFrame() date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count() date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count() date_y_freq[:i].plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_y distribution between training/testing set (first year)') date_y_freq[2 * i:].plot(secondary_y='Testing set', figsize=(20, 8), title='Comparison of date_y distribution between training/testing set (last year)')
code
322536/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_x.plot(secondary_y='Frequency', figsize=(20, 10))
code
322536/cell_16
[ "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from sklearn.metrics import roc_auc_score import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_y = pd.DataFrame() date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean() date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size() i = int(len(date_y) / 3) date_x_freq = pd.DataFrame() date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count() date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count() date_y_freq = pd.DataFrame() date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count() date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count() from sklearn.metrics import roc_auc_score features = pd.DataFrame() features['date_x_prob'] = df_train.groupby('date_x')['outcome'].transform('mean') features['date_y_prob'] = df_train.groupby('date_y')['outcome'].transform('mean') features['date_x_count'] = df_train.groupby('date_x')['outcome'].transform('count') features['date_y_count'] = df_train.groupby('date_y')['outcome'].transform('count') _ = [print(f.ljust(12) + ' AUC: ' + str(round(roc_auc_score(df_train['outcome'], features[f]), 6))) for f in features.columns]
code
322536/cell_14
[ "text_plain_output_1.png", "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_y = pd.DataFrame() date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean() date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size() i = int(len(date_y) / 3) date_x_freq = pd.DataFrame() date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count() date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count() date_y_freq = pd.DataFrame() date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count() date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count() print('date_y correlation in year 1: ' + str(np.corrcoef(date_y_freq[:i].fillna(0).T)[0, 1])) print('date_y correlation in year 2: ' + str(np.corrcoef(date_y_freq[i:2 * i].fillna(0).T)[0, 1])) print('date_y correlation in year 3: ' + str(np.corrcoef(date_y_freq[2 * i:].fillna(0).T)[0, 1]))
code
322536/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl date_x = pd.DataFrame() date_x['Class probability'] = df_train.groupby('date_x')['outcome'].mean() date_x['Frequency'] = df_train.groupby('date_x')['outcome'].size() date_y = pd.DataFrame() date_y['Class probability'] = df_train.groupby('date_y')['outcome'].mean() date_y['Frequency'] = df_train.groupby('date_y')['outcome'].size() i = int(len(date_y) / 3) date_x_freq = pd.DataFrame() date_x_freq['Training set'] = df_train.groupby('date_x')['activity_id'].count() date_x_freq['Testing set'] = df_test.groupby('date_x')['activity_id'].count() date_y_freq = pd.DataFrame() date_y_freq['Training set'] = df_train.groupby('date_y')['activity_id'].count() date_y_freq['Testing set'] = df_test.groupby('date_y')['activity_id'].count() print('Correlation of date_x distribution in training/testing sets: ' + str(np.corrcoef(date_x_freq.T)[0, 1])) print('Correlation of date_y distribution in training/testing sets: ' + str(np.corrcoef(date_y_freq.fillna(0).T)[0, 1]))
code
322536/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/act_train.csv', parse_dates=['date']) test = pd.read_csv('../input/act_test.csv', parse_dates=['date']) ppl = pd.read_csv('../input/people.csv', parse_dates=['date']) df_train = pd.merge(train, ppl, on='people_id') df_test = pd.merge(test, ppl, on='people_id') del train, test, ppl for d in ['date_x', 'date_y']: print('Start of ' + d + ': ' + str(df_train[d].min().date())) print(' End of ' + d + ': ' + str(df_train[d].max().date())) print('Range of ' + d + ': ' + str(df_train[d].max() - df_train[d].min()) + '\n')
code
1009501/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') t16 = data.loc[(data.id == 288) & (data.technical_16 != 0.0) & (~data.technical_16.isnull()) ,['timestamp', 'technical_16']] ax = t16.plot(use_index=False) ax=t16.technical_16.plot(use_index=False) t16 = data.loc[(data.id == 1201) & (data.technical_16 != 0.0) & (~data.technical_16.isnull()) ,['timestamp', 'technical_16']] ax=t16.technical_16.plot(use_index=False) ax = t16.plot(use_index=False)
code
1009501/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') t16 = data.loc[(data.id == 288) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']] ax = t16.plot(use_index=False)
code
1009501/cell_8
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') t16 = data.loc[(data.id == 288) & (data.technical_16 != 0.0) & (~data.technical_16.isnull()) ,['timestamp', 'technical_16']] ax = t16.plot(use_index=False) ax = t16.technical_16.plot(use_index=False)
code
1009501/cell_3
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5')
code
1009501/cell_10
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') t16 = data.loc[(data.id == 288) & (data.technical_16 != 0.0) & (~data.technical_16.isnull()) ,['timestamp', 'technical_16']] ax = t16.plot(use_index=False) ax=t16.technical_16.plot(use_index=False) t16 = data.loc[(data.id == 1201) & (data.technical_16 != 0.0) & ~data.technical_16.isnull(), ['timestamp', 'technical_16']] ax = t16.technical_16.plot(use_index=False)
code
1009501/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data = pd.read_hdf('../input/train.h5') data.technical_16.describe()
code
1005437/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd uber_data = pd.read_csv('../input/uber-raw-data-janjune-15.csv') uber_data.shape Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] Index = [0, 1, 2, 3, 4, 5] Monthly_pickup = uber_data.groupby(['Month']).size() plt.xticks(Index, Month) month = ['01', '02', '03', '04', '05', '06'] idx = [0, 7, 14, 21, 27] def daily_pickup_plot(month): plot_data = uber_data[uber_data['Month'] == month] plot_data = plot_data.groupby(['Date']).size() plt.xticks(idx, plot_data.index[idx]) for i in range(0, 6): plt.ylim(0, 140000) Hourly_pickup = uber_data.groupby(['Hour']).size() mean = Hourly_pickup.mean() hour = [i for i in range(0, 24)] plt.xticks(hour, hour) def hourly_pickup_plot(month): plot_data = uber_data[uber_data['Month'] == month] plot_data = plot_data.groupby(['Hour']).size() plot_data.plot(kind='bar') plt.xlabel('') plt.xticks(hour, plot_data.index[hour]) plt.figure(1, figsize=(12, 24)) for i in range(0, 6): plt.subplot(3, 2, i + 1) hourly_pickup_plot(month[i]) plt.title('Hourly Pickup of ' + Month[i] + ' 2015') plt.xlabel('') plt.ylim(0, 200000)
code
1005437/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd uber_data = pd.read_csv('../input/uber-raw-data-janjune-15.csv') uber_data.shape
code
1005437/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd uber_data = pd.read_csv('../input/uber-raw-data-janjune-15.csv') uber_data.shape Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] Index = [0, 1, 2, 3, 4, 5] Monthly_pickup = uber_data.groupby(['Month']).size() plt.figure(1, figsize=(12, 6)) plt.bar(Index, Monthly_pickup) plt.xticks(Index, Month) plt.title('UBER Monthly Pickup Summary in NYC')
code
1005437/cell_7
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd uber_data = pd.read_csv('../input/uber-raw-data-janjune-15.csv') uber_data.shape Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] Index = [0, 1, 2, 3, 4, 5] Monthly_pickup = uber_data.groupby(['Month']).size() plt.xticks(Index, Month) month = ['01', '02', '03', '04', '05', '06'] idx = [0, 7, 14, 21, 27] def daily_pickup_plot(month): plot_data = uber_data[uber_data['Month'] == month] plot_data = plot_data.groupby(['Date']).size() plot_data.plot(kind='bar', rot=45) plt.xlabel('') plt.xticks(idx, plot_data.index[idx]) plt.figure(1, figsize=(12, 24)) for i in range(0, 6): plt.subplot(3, 2, i + 1) daily_pickup_plot(month[i]) plt.ylim(0, 140000) plt.title('Daily Pickup of ' + Month[i])
code
1005437/cell_8
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd uber_data = pd.read_csv('../input/uber-raw-data-janjune-15.csv') uber_data.shape Month = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun'] Index = [0, 1, 2, 3, 4, 5] Monthly_pickup = uber_data.groupby(['Month']).size() plt.xticks(Index, Month) month = ['01', '02', '03', '04', '05', '06'] idx = [0, 7, 14, 21, 27] def daily_pickup_plot(month): plot_data = uber_data[uber_data['Month'] == month] plot_data = plot_data.groupby(['Date']).size() plt.xticks(idx, plot_data.index[idx]) for i in range(0, 6): plt.ylim(0, 140000) Hourly_pickup = uber_data.groupby(['Hour']).size() mean = Hourly_pickup.mean() hour = [i for i in range(0, 24)] plt.figure(1, figsize=(12, 6)) plt.bar(hour, Hourly_pickup) plt.title('UBER Hourly Pickup Summary of NYC, Jan 2015 - Jun 2015') plt.xlabel('') plt.xticks(hour, hour) plt.show()
code
330906/cell_13
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values decadeList = list(new_df['Decade'].unique()) boys_percentileList = [] girls_percentileList = [] boys_df = new_df[new_df['Gender'] == 'M'].copy() girls_df = new_df[new_df['Gender'] == 'F'].copy() for i in decadeList: scaler = MinMaxScaler() boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']])) girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']])) boys_df['decade_percentile'] = boys_percentileList girls_df['decade_percentile'] = girls_percentileList new_df = boys_df.append(girls_df) new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100) new_df.sort_index(inplace=True) del boys_df del girls_df plt.plot(new_df[(new_df['Name'] == 'John') & (new_df['Gender'] == 'M')]['Decade'], new_df[(new_df['Name'] == 'John') & (new_df['Gender'] == 'M')]['decade_percentile'])
code
330906/cell_9
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values new_df.head()
code
330906/cell_4
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df.head()
code
330906/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values decadeList = list(new_df['Decade'].unique()) boys_percentileList = [] girls_percentileList = [] boys_df = new_df[new_df['Gender'] == 'M'].copy() girls_df = new_df[new_df['Gender'] == 'F'].copy() for i in decadeList: scaler = MinMaxScaler() boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']])) girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']])) boys_df['decade_percentile'] = boys_percentileList girls_df['decade_percentile'] = girls_percentileList new_df = boys_df.append(girls_df) new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100) new_df.sort_index(inplace=True) del boys_df del girls_df def nameFilter(decade, gender, lowerBound, upperBound, startsWith=None): """ This function helps you find rare/common baby names! Inputs: decade : integer = Decade as a 4 digit number, e.g. 1980. gender : string = Gender as a single letter string, e.g. 'M' for Male lowerBound: float = Lower percentage of the names you want to query, e.g. 25 for 25%, NOT 0.25 upperBound: float = Upper percentage of the names you want to query startsWith: str = (Optional) Single letter representing the starting letter of a name Returns: A dataframe slice fitting your parameters. """ if upperBound < lowerBound: raise ValueError('lowerBound needs to be less than upperBound') if startsWith != None: result_df = new_df[(new_df['Decade'] == decade) & (new_df['Gender'] == gender) & (new_df['decade_percentile'] >= lowerBound) & (new_df['decade_percentile'] <= upperBound) & (new_df['Name'].str[0] == startsWith.upper())] else: result_df = new_df[(new_df['Decade'] == decade) & (new_df['Gender'] == gender) & (new_df['decade_percentile'] >= lowerBound) & (new_df['decade_percentile'] <= upperBound)] return result_df nameFilter(decade=1980, gender='M', lowerBound=50, upperBound=100, startsWith='C')
code
330906/cell_19
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import matplotlib.pyplot as plt import pandas as pd import seaborn as sns df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values decadeList = list(new_df['Decade'].unique()) boys_percentileList = [] girls_percentileList = [] boys_df = new_df[new_df['Gender'] == 'M'].copy() girls_df = new_df[new_df['Gender'] == 'F'].copy() for i in decadeList: scaler = MinMaxScaler() boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']])) girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']])) boys_df['decade_percentile'] = boys_percentileList girls_df['decade_percentile'] = girls_percentileList new_df = boys_df.append(girls_df) new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100) new_df.sort_index(inplace=True) del boys_df del girls_df plt.figure() sns.distplot(new_df[new_df['Gender'] == 'M']['decade_percentile'], bins=100) plt.xlim(xmin=0, xmax=100) plt.title('Boys Name Popularity Distribution') plt.figure() sns.distplot(new_df[new_df['Gender'] == 'F']['decade_percentile'], bins=100) plt.xlim(xmin=0, xmax=100) plt.title('Girls Name Popularity Distribution') plt.show()
code
330906/cell_7
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df.tail()
code
330906/cell_15
[ "text_plain_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values decadeList = list(new_df['Decade'].unique()) boys_percentileList = [] girls_percentileList = [] boys_df = new_df[new_df['Gender'] == 'M'].copy() girls_df = new_df[new_df['Gender'] == 'F'].copy() for i in decadeList: scaler = MinMaxScaler() boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']])) girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']])) boys_df['decade_percentile'] = boys_percentileList girls_df['decade_percentile'] = girls_percentileList new_df = boys_df.append(girls_df) new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100) new_df.sort_index(inplace=True) del boys_df del girls_df new_df[new_df['decade_percentile'] >= 99.0]
code
330906/cell_17
[ "text_html_output_1.png" ]
from sklearn.preprocessing import MinMaxScaler import pandas as pd df = pd.read_csv('../input/NationalNames.csv') df['Decade'] = df['Year'].apply(lambda x: x - x % 10) df_pivot = df.pivot_table(values='Count', index=['Decade', 'Name', 'Gender'], aggfunc='sum') new_df = pd.DataFrame() new_df['Decade'] = df_pivot.index.get_level_values('Decade') new_df['Name'] = df_pivot.index.get_level_values('Name') new_df['Gender'] = df_pivot.index.get_level_values('Gender') new_df['Count'] = df_pivot.values decadeList = list(new_df['Decade'].unique()) boys_percentileList = [] girls_percentileList = [] boys_df = new_df[new_df['Gender'] == 'M'].copy() girls_df = new_df[new_df['Gender'] == 'F'].copy() for i in decadeList: scaler = MinMaxScaler() boys_percentileList.extend(scaler.fit_transform(boys_df[boys_df['Decade'] == i][['Count']])) girls_percentileList.extend(scaler.fit_transform(girls_df[girls_df['Decade'] == i][['Count']])) boys_df['decade_percentile'] = boys_percentileList girls_df['decade_percentile'] = girls_percentileList new_df = boys_df.append(girls_df) new_df['decade_percentile'] = new_df['decade_percentile'].apply(lambda x: float(x) * 100) new_df.sort_index(inplace=True) del boys_df del girls_df new_df[new_df['decade_percentile'] < 1]
code
330906/cell_5
[ "text_html_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/NationalNames.csv') print('Data year ranges from {} to {}'.format(min(df['Year']), max(df['Year'])))
code
2004143/cell_13
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) plt.figure(figsize=(15, 8)) sns.set_style('whitegrid') ax = sns.countplot(x='Title', data=full_set) ax.set_ylabel('COUNT', size=20, color='black', alpha=0.5) ax.set_xlabel('TITLE', size=20, color='black', alpha=0.5) ax.set_title('COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION', size=20, color='black', alpha=0.5)
code
2004143/cell_9
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) print(full_set.describe())
code
2004143/cell_4
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') print('\n\nInformation about Null/ empty data points in each Column of Test set\n\n') print(test_full_set.info())
code
2004143/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare' full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.' """ 1 ---Family Size =1 2 ---Family Size between 2 and 4(included) 3 ---Family Size more than 4""" family_size = [] for row in full_set.FamilyMembers: if row in [1]: family_size.append(1) elif row in [2, 3, 4]: family_size.append(2) else: family_size.append(3) full_set['FamilySize'] = family_size print('\n\n Number of null in each column before imputing:\n') print(full_set.isnull().sum())
code
2004143/cell_20
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) plt.figure(figsize=(15,8)) sns.set_style("whitegrid") ax=sns.countplot(x="Title", data=full_set) ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5) ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5) ax.set_title("COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION",size = 20,color="black",alpha=0.5) full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare' full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.' plt.figure(figsize=(15,8)) sns.set_style("whitegrid") ax=sns.countplot(x="Title", data=full_set) ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5) ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5) ax.set_title("COUNT OF TITLES IN EACH CATEGORY AFTER COMBINATION",size = 20,color="black",alpha=0.5) family_size_survival = full_set[['FamilyMembers', 'Survived']].groupby(['FamilyMembers'], as_index=False).count().sort_values(by='Survived', ascending=False) plt.figure(figsize=(15, 8)) sns.set_style('whitegrid') ax = sns.barplot(x='FamilyMembers', y='Survived', data=family_size_survival) ax.set_title('SURVIVED PASSENGER COUNT BASED ON FAMILY SIZE', size=20, color='black', alpha=0.5) ax.set_ylabel('NUMBER SURVIVED', size=20, color='black', alpha=0.5) ax.set_xlabel('FAMILY SIZE', size=20, color='black', alpha=0.5)
code
2004143/cell_6
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) print(full_set.head(5))
code
2004143/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd train_full_set = pd.read_csv('../input/train.csv') print('/n/nInformation about Null/ empty data points in each Column of Training set\n\n') print(train_full_set.info())
code
2004143/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy import stats import numpy as np from sklearn.cross_validation import cross_val_score
code
2004143/cell_7
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) print('Information about Null/ empty data points in each Column\n\n') print(full_set.info())
code
2004143/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare' full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.' print(full_set.Title.value_counts())
code
2004143/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() print(corr) plt.figure() plt.imshow(corr, cmap='GnBu') plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) plt.suptitle('Correlation Matrix', fontsize=15, fontweight='bold') plt.show()
code
2004143/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) plt.figure(figsize=(15,8)) sns.set_style("whitegrid") ax=sns.countplot(x="Title", data=full_set) ax.set_ylabel("COUNT",size = 20,color="black",alpha=0.5) ax.set_xlabel("TITLE",size = 20,color="black",alpha=0.5) ax.set_title("COUNT OF TITLES IN EACH CATEGORY BEFORE COMBINATION",size = 20,color="black",alpha=0.5) full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare' full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.' plt.figure(figsize=(15, 8)) sns.set_style('whitegrid') ax = sns.countplot(x='Title', data=full_set) ax.set_ylabel('COUNT', size=20, color='black', alpha=0.5) ax.set_xlabel('TITLE', size=20, color='black', alpha=0.5) ax.set_title('COUNT OF TITLES IN EACH CATEGORY AFTER COMBINATION', size=20, color='black', alpha=0.5)
code
2004143/cell_24
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) full_set.loc[full_set['Title'].isin(['Dona.', 'Lady.', 'Countess.', 'Capt.', 'Col.', 'Don.', 'Dr.', 'Major.', 'Rev.', 'Sir.', 'Jonkheer.']), 'Title'] = 'Rare' full_set.loc[full_set['Title'].isin(['Mlle.', 'Ms.', 'Mme.']), 'Title'] = 'Miss.' """ 1 ---Family Size =1 2 ---Family Size between 2 and 4(included) 3 ---Family Size more than 4""" family_size = [] for row in full_set.FamilyMembers: if row in [1]: family_size.append(1) elif row in [2, 3, 4]: family_size.append(2) else: family_size.append(3) full_set['FamilySize'] = family_size full_set[full_set['Embarked'].isnull()]
code
2004143/cell_22
[ "text_plain_output_1.png" ]
"""IMPUTING MISSING VALUES"""
code
2004143/cell_10
[ "text_plain_output_1.png" ]
"""Feature Creation""" 'Creating Title'
code
2004143/cell_12
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd train_full_set = pd.read_csv('../input/train.csv') full_set_initial = pd.get_dummies(data=train_full_set, columns=['Embarked', 'Sex', 'Survived'], drop_first=True) full_set_initial = full_set_initial.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis=1) corr = full_set_initial.corr() plt.colorbar() plt.xticks(range(len(corr)), corr.columns, rotation='vertical') plt.yticks(range(len(corr)), corr.columns) test_full_set = pd.read_csv('../input/test.csv') full_set = pd.concat([train_full_set, test_full_set]) full_set = full_set.reset_index(drop=True) """ We have to drop three columns from our study,namely: Cabin,Ticket, PassengerId We dropped Cabin because of two main reasons: a. Cabin has 687 Null Values out of 891 i.e almost 77% values are null. b. Cabin number is directly related to the Class as cabin was alotted based on the level of class. So, cabin can easily be dropped from our analysis. We dropped Ticket and PassengerId from our analysis because these two could not have affected the survival of the passengers. It is just a demographic information.""" full_set.drop(['Ticket', 'Cabin', 'PassengerId'], axis=1, inplace=True) print(full_set.Title.value_counts())
code
50244377/cell_13
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 TRAIN_DIRECTORY = '/kaggle/working/train/' TEST_DIRECTORY = '/kaggle/working/test1' def get_filenames(directory): filenames = os.listdir(directory) return filenames def load_data(filenames, directory): i = 50 i = len(filenames) X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join(directory, name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X, y): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T y = np.array(y) y = y.reshape((1, y.shape[0])) return (X, y) X, y = refine_data(X, y) print(X.shape) print(y.shape)
code
50244377/cell_20
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 TRAIN_DIRECTORY = '/kaggle/working/train/' TEST_DIRECTORY = '/kaggle/working/test1' def get_filenames(directory): filenames = os.listdir(directory) return filenames def load_data(filenames, directory): i = 50 i = len(filenames) X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join(directory, name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X, y): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T y = np.array(y) y = y.reshape((1, y.shape[0])) return (X, y) X, y = refine_data(X, y) layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): np.random.seed(3) parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters def linear_fwd(A, W, b): Z = np.dot(W, A) + b cache = (A, W, b) return (Z, cache) Z, cache = linear_fwd(X, parameters['W1'], parameters['b1']) Z.shape def sigmoid(Z): A = 1 / (1 + np.exp(-Z)) cache = Z return (A, Z) def relu(Z): A = np.maximum(Z, 0) cache = Z return (A, Z) sigmoid(np.array([0, 2]))
code
50244377/cell_11
[ "text_plain_output_1.png", "image_output_1.png" ]
import cv2 import matplotlib.image as mpimg import matplotlib.pyplot as plt import os import random import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 TRAIN_DIRECTORY = '/kaggle/working/train/' TEST_DIRECTORY = '/kaggle/working/test1' def get_filenames(directory): filenames = os.listdir(directory) return filenames def load_data(filenames, directory): i = 50 i = len(filenames) X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join(directory, name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) filenames = get_filenames(TRAIN_DIRECTORY) X, y = load_data(filenames, TRAIN_DIRECTORY) def show_image(filenames, directory): sample = random.choice(filenames) show_image(filenames, TRAIN_DIRECTORY)
code
50244377/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os print(os.listdir('../input/dogs-vs-cats'))
code
50244377/cell_18
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 TRAIN_DIRECTORY = '/kaggle/working/train/' TEST_DIRECTORY = '/kaggle/working/test1' def get_filenames(directory): filenames = os.listdir(directory) return filenames def load_data(filenames, directory): i = 50 i = len(filenames) X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join(directory, name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X, y): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T y = np.array(y) y = y.reshape((1, y.shape[0])) return (X, y) X, y = refine_data(X, y) layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): np.random.seed(3) parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters def linear_fwd(A, W, b): Z = np.dot(W, A) + b cache = (A, W, b) return (Z, cache) Z, cache = linear_fwd(X, parameters['W1'], parameters['b1']) Z.shape
code
50244377/cell_16
[ "text_plain_output_1.png" ]
import cv2 import matplotlib.image as mpimg import numpy as np import os import numpy as np import pandas as pd import zipfile from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt import matplotlib.image as mpimg import cv2 import random import os IMAGE_WIDTH = 128 IMAGE_HEIGHT = 128 IMAGE_SIZE = (IMAGE_WIDTH, IMAGE_HEIGHT) IMAGE_CHANNELS = 3 TRAIN_DIRECTORY = '/kaggle/working/train/' TEST_DIRECTORY = '/kaggle/working/test1' def get_filenames(directory): filenames = os.listdir(directory) return filenames def load_data(filenames, directory): i = 50 i = len(filenames) X = [] y = [] for name in filenames: img = mpimg.imread(os.path.join(directory, name)) X.append(cv2.resize(img, IMAGE_SIZE)) cat = name.split('.')[0] if cat == 'dog': y.append(0) else: y.append(1) i -= 1 if i <= 0: break return (X, y) def refine_data(X, y): X = np.array(X) X = X.reshape(X.shape[0], -1) X = X.T y = np.array(y) y = y.reshape((1, y.shape[0])) return (X, y) X, y = refine_data(X, y) layer_dims = [X.shape[0], 20, 7, 5, 1] def initialize_parameters(layer_dims): np.random.seed(3) parameters = {} L = len(layer_dims) for l in range(1, L): parameters['W' + str(l)] = np.random.randn(layer_dims[l], layer_dims[l - 1]) * 0.01 parameters['b' + str(l)] = np.zeros((layer_dims[l], 1)) return parameters parameters = initialize_parameters(layer_dims) parameters
code
17123393/cell_25
[ "text_html_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) plt.figure(figsize=(12,7)) axis = sns.barplot(x=np.arange(0,5,1),y=df_numeric.groupby(['cluster']).count()['budget'].values) x=axis.set_xlabel("Cluster Number") x=axis.set_ylabel("Number of movies") size_array = list(df_numeric.groupby(['cluster']).count()['budget'].values) size_array df_numeric[df_numeric['cluster'] == 3].tail(5)
code
17123393/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df.head(2)
code
17123393/cell_20
[ "text_html_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) plt.figure(figsize=(12, 7)) axis = sns.barplot(x=np.arange(0, 5, 1), y=df_numeric.groupby(['cluster']).count()['budget'].values) x = axis.set_xlabel('Cluster Number') x = axis.set_ylabel('Number of movies')
code
17123393/cell_6
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.head()
code
17123393/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os print(os.listdir('../input'))
code
17123393/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum()
code
17123393/cell_15
[ "text_plain_output_1.png" ]
from sklearn import preprocessing from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=5) kmeans.fit(df_numeric_scaled) len(kmeans.labels_)
code
17123393/cell_3
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv')
code
17123393/cell_17
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) df_numeric.head()
code
17123393/cell_24
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) plt.figure(figsize=(12,7)) axis = sns.barplot(x=np.arange(0,5,1),y=df_numeric.groupby(['cluster']).count()['budget'].values) x=axis.set_xlabel("Cluster Number") x=axis.set_ylabel("Number of movies") size_array = list(df_numeric.groupby(['cluster']).count()['budget'].values) size_array df_numeric[df_numeric['cluster'] == 2].head(5)
code
17123393/cell_14
[ "text_html_output_1.png" ]
from sklearn import preprocessing from sklearn.cluster import KMeans import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) from sklearn.cluster import KMeans kmeans = KMeans(n_clusters=5) kmeans.fit(df_numeric_scaled)
code
17123393/cell_22
[ "text_plain_output_1.png" ]
from sklearn import preprocessing import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) plt.figure(figsize=(12,7)) axis = sns.barplot(x=np.arange(0,5,1),y=df_numeric.groupby(['cluster']).count()['budget'].values) x=axis.set_xlabel("Cluster Number") x=axis.set_ylabel("Number of movies") size_array = list(df_numeric.groupby(['cluster']).count()['budget'].values) size_array
code
17123393/cell_10
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape
code
17123393/cell_12
[ "text_html_output_1.png" ]
from sklearn import preprocessing import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('../input/movies_metadata.csv') df_numeric = df[['budget', 'popularity', 'revenue', 'runtime', 'vote_average', 'vote_count', 'title']] df_numeric.isnull().sum() df_numeric = df_numeric.dropna() df_numeric = df_numeric[df_numeric['vote_count'] > 30] df_numeric.shape from sklearn import preprocessing minmax_processed = preprocessing.MinMaxScaler().fit_transform(df_numeric.drop('title', axis=1)) df_numeric_scaled = pd.DataFrame(minmax_processed, index=df_numeric.index, columns=df_numeric.columns[:-1]) df_numeric_scaled.head()
code
17123567/cell_6
[ "text_plain_output_1.png" ]
from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('hacker_news', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) for table in tables: print(table.table_id)
code
17123567/cell_8
[ "text_html_output_1.png" ]
from google.cloud import bigquery client = bigquery.Client() dataset_ref = client.dataset('hacker_news', project='bigquery-public-data') dataset = client.get_dataset(dataset_ref) tables = list(client.list_tables(dataset)) table_ref = dataset_ref.table('full') table = client.get_table(table_ref) table.schema
code
17123567/cell_3
[ "text_plain_output_1.png" ]
from google.cloud import bigquery client = bigquery.Client()
code