path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
105195570/cell_21
[ "image_output_1.png" ]
from scipy.stats import boxcox from sklearn import preprocessing import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns data.replace(to_replace={'Vic': 'Victoria', 'm': 'male', 'f': 'female'}, inplace=True) data = data.rename(columns={'Pop': 'pop'}) list_of_cat_unordered_features = data[['site', 'pop', 'sex']] for feature in list_of_cat_unordered_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.countplot(x = data[feature], data=data, order = data[feature].value_counts().index) for container in ax.containers: ax.bar_label(container) plt.show() list_of_num_features = data[['age','hdlngth', 'skullw', 'totlngth','taill', 'footlgth', 'earconch', 'eye', 'chest', 'belly']] for feature in list_of_num_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) for container in ax.containers: ax.bar_label(container) plt.show() #normalisation of skull width attribute data['skullw'] = boxcox(data['skullw'])[0] plt.figure(figsize=(12,6.5)) plt.title('Skull size', fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) corr = data.corr() cat_columns = [feature for feature in data.columns if data[feature].dtype == 'object'] encoder = preprocessing.LabelEncoder() for col in cat_columns: data[col] = encoder.fit_transform(data[col]) data.head()
code
105195570/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns
code
105195570/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum()
code
105195570/cell_19
[ "image_output_5.png", "image_output_7.png", "image_output_4.png", "image_output_8.png", "image_output_6.png", "image_output_3.png", "image_output_2.png", "image_output_1.png", "image_output_10.png", "image_output_9.png" ]
from scipy.stats import boxcox import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns data.replace(to_replace={'Vic': 'Victoria', 'm': 'male', 'f': 'female'}, inplace=True) data = data.rename(columns={'Pop': 'pop'}) list_of_cat_unordered_features = data[['site', 'pop', 'sex']] for feature in list_of_cat_unordered_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.countplot(x = data[feature], data=data, order = data[feature].value_counts().index) for container in ax.containers: ax.bar_label(container) plt.show() list_of_num_features = data[['age','hdlngth', 'skullw', 'totlngth','taill', 'footlgth', 'earconch', 'eye', 'chest', 'belly']] for feature in list_of_num_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) for container in ax.containers: ax.bar_label(container) plt.show() #normalisation of skull width attribute data['skullw'] = boxcox(data['skullw'])[0] plt.figure(figsize=(12,6.5)) plt.title('Skull size', fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) corr = data.corr() plt.figure(figsize=(20, 12)) sns.heatmap(corr, linewidths=4, annot=True, fmt='.2f', cmap='BrBG') plt.show()
code
105195570/cell_7
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.head()
code
105195570/cell_18
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
from scipy.stats import boxcox import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns data.replace(to_replace={'Vic': 'Victoria', 'm': 'male', 'f': 'female'}, inplace=True) data = data.rename(columns={'Pop': 'pop'}) list_of_cat_unordered_features = data[['site', 'pop', 'sex']] for feature in list_of_cat_unordered_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.countplot(x = data[feature], data=data, order = data[feature].value_counts().index) for container in ax.containers: ax.bar_label(container) plt.show() list_of_num_features = data[['age','hdlngth', 'skullw', 'totlngth','taill', 'footlgth', 'earconch', 'eye', 'chest', 'belly']] for feature in list_of_num_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) for container in ax.containers: ax.bar_label(container) plt.show() data['skullw'] = boxcox(data['skullw'])[0] plt.figure(figsize=(12, 6.5)) plt.title('Skull size', fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data)
code
105195570/cell_8
[ "image_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape
code
105195570/cell_16
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns data.replace(to_replace={'Vic': 'Victoria', 'm': 'male', 'f': 'female'}, inplace=True) data = data.rename(columns={'Pop': 'pop'}) list_of_cat_unordered_features = data[['site', 'pop', 'sex']] for feature in list_of_cat_unordered_features: plt.figure(figsize=(12, 6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.countplot(x=data[feature], data=data, order=data[feature].value_counts().index) for container in ax.containers: ax.bar_label(container) plt.show()
code
105195570/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.columns data.replace(to_replace={'Vic': 'Victoria', 'm': 'male', 'f': 'female'}, inplace=True) data = data.rename(columns={'Pop': 'pop'}) list_of_cat_unordered_features = data[['site', 'pop', 'sex']] for feature in list_of_cat_unordered_features: plt.figure(figsize=(12,6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.countplot(x = data[feature], data=data, order = data[feature].value_counts().index) for container in ax.containers: ax.bar_label(container) plt.show() list_of_num_features = data[['age', 'hdlngth', 'skullw', 'totlngth', 'taill', 'footlgth', 'earconch', 'eye', 'chest', 'belly']] for feature in list_of_num_features: plt.figure(figsize=(12, 6.5)) plt.title(feature, fontsize=15, fontweight='bold', fontname='Helvetica', ha='center') ax = sns.boxplot(y=data[feature], data=data) for container in ax.containers: ax.bar_label(container) plt.show()
code
105195570/cell_10
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.describe()
code
105195570/cell_12
[ "text_html_output_1.png" ]
import pandas as pd data = pd.read_csv('../input/openintro-possum/possum.csv') data.shape data.isna().sum() data.dropna(axis=0, inplace=True) data.info()
code
129021961/cell_21
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] X.iloc[0].shape imag = X.iloc[9] import numpy as np imagenum = np.array(imag) finalimage = imagenum.reshape(28, 28) final_X = X / 255 final_y = to_categorical(y, num_classes=26) final_y.shape model = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(26, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', metrics=['accuracy']) model.fit(final_X, final_y, epochs=10, batch_size=32) import pickle pickle.dump(model, open('finalalpha.pkl', 'wb')) pickled_model = pickle.load(open('finalalpha.pkl', 'rb'))
code
129021961/cell_13
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import to_categorical import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] final_y = to_categorical(y, num_classes=26) final_y
code
129021961/cell_9
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) X.iloc[0].shape imag = X.iloc[9] import numpy as np imagenum = np.array(imag) finalimage = imagenum.reshape(28, 28) import matplotlib.pyplot as plt plt.imshow(finalimage)
code
129021961/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape
code
129021961/cell_20
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] X.iloc[0].shape imag = X.iloc[9] import numpy as np imagenum = np.array(imag) finalimage = imagenum.reshape(28, 28) final_X = X / 255 final_y = to_categorical(y, num_classes=26) final_y.shape model = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(26, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', metrics=['accuracy']) model.fit(final_X, final_y, epochs=10, batch_size=32) import pickle pickle.dump(model, open('finalalpha.pkl', 'wb'))
code
129021961/cell_11
[ "text_html_output_1.png" ]
from tensorflow.keras.utils import to_categorical
code
129021961/cell_19
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.utils import to_categorical import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] X.iloc[0].shape imag = X.iloc[9] import numpy as np imagenum = np.array(imag) finalimage = imagenum.reshape(28, 28) final_X = X / 255 final_y = to_categorical(y, num_classes=26) final_y.shape model = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(26, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', metrics=['accuracy']) model.fit(final_X, final_y, epochs=10, batch_size=32)
code
129021961/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129021961/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) X.iloc[0].shape
code
129021961/cell_3
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.head(5)
code
129021961/cell_17
[ "text_plain_output_1.png", "image_output_1.png" ]
from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential model = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(26, activation='softmax')) model.summary()
code
129021961/cell_24
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Dense from tensorflow.keras.models import Sequential from tensorflow.keras.utils import to_categorical import cv2 import numpy as np import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import pickle df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] X.iloc[0].shape imag = X.iloc[9] import numpy as np imagenum = np.array(imag) finalimage = imagenum.reshape(28, 28) final_X = X / 255 final_y = to_categorical(y, num_classes=26) final_y.shape model = Sequential() model.add(Dense(64, activation='relu', input_shape=(784,))) model.add(Dense(32, activation='relu')) model.add(Dense(32, activation='relu')) model.add(Dense(26, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', metrics=['accuracy']) model.fit(final_X, final_y, epochs=10, batch_size=32) import pickle pickle.dump(model, open('finalalpha.pkl', 'wb')) pickled_model = pickle.load(open('finalalpha.pkl', 'rb')) def get(A): A = cv2.cvtColor(A, cv2.COLOR_BGR2GRAY) A = cv2.resize(A, (28, 28)) A = A.reshape(1, 784) / 255 return pickled_model.predict(A).argmax() get(cv2.imread('/kaggle/input/imgfileb/imbB.png'))
code
129021961/cell_14
[ "text_plain_output_1.png" ]
from tensorflow.keras.utils import to_categorical import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('/kaggle/input/az-handwritten-alphabets-in-csv-format/A_Z Handwritten Data.csv') df.shape X = df.drop(['0'], axis=1) y = df['0'] final_y = to_categorical(y, num_classes=26) final_y.shape
code
105173227/cell_21
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) l1 = [] for i in range(1, 6): l1.append(i ** 2) l2 = [i ** 2 for i in range(1, 6)] l3 = [] for i in range(1, 11): if i % 2 == 0: l3.append(i) l4 = [i for i in range(1, 11) if i % 2 == 0] s1 = {i for i in range(1, 11) if i % 2 == 0} s2 = {i ** 2 for i in range(1, 6)} l1 = [1, 4, 65, 24, 83, 43, 21] l1.sort() def s(x): return x[1] def s1(x): return x[0] + x[1] l2 = [[1, 2], [3, 4], [2, 6], [7, 5]] l2.sort() l2.sort(key=s) l2.sort(key=s1) l3 = [[1, 2], [3, 4], [2, 6], [7, 5]] l3.sort(key=lambda x: x[0] + x[1]) def check(x): return 'even' if x % 2 == 0 else 'ODD' l1 = [1, 2, 3, 4, 5, 6] l2 = [] for i in l1: l2.append(check(i)) print(l2) l3 = list(map(check, l1)) print(l3) l4 = [1, 2, 3, 4, 5, 6, 7, 8, 9] l5 = [9, 8, 7, 6, 5, 4, 3, 2, 1] l6 = list(map(lambda x, y: y - x, l4, l5)) print(l6)
code
105173227/cell_9
[ "text_plain_output_1.png" ]
l1 = [] for i in range(1, 6): l1.append(i ** 2) print(l1) l2 = [i ** 2 for i in range(1, 6)] print(l2)
code
105173227/cell_4
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 print(c) except: print('wrong values entered') print('Next line')
code
105173227/cell_23
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except Exception as e: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except Exception as e: def check1(a): vowles = ['a', 'e', 'i', 'o', 'u'] if a in vowles: return True else: return False l7 = ['a', 'b', 'c', 'e', 'g', 'o', 'z'] l8 = list(map(check1, l7)) l9 = list(filter(check1, l7)) print(l8) print(l9)
code
105173227/cell_6
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except Exception as e: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 print(c) except Exception as e: print('wrong values entered') print(e) else: print('Next line')
code
105173227/cell_2
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') print(p) s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') print(p2) age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} for i in age: print('{:<10} - {}'.format(i, age[i])) a = 'profit is {:,}' b = a.format(1234567890) print(b)
code
105173227/cell_19
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) l1 = [] for i in range(1, 6): l1.append(i ** 2) l2 = [i ** 2 for i in range(1, 6)] l3 = [] for i in range(1, 11): if i % 2 == 0: l3.append(i) l4 = [i for i in range(1, 11) if i % 2 == 0] s1 = {i for i in range(1, 11) if i % 2 == 0} s2 = {i ** 2 for i in range(1, 6)} l1 = [1, 4, 65, 24, 83, 43, 21] l1.sort() print(l1) def s(x): return x[1] def s1(x): return x[0] + x[1] l2 = [[1, 2], [3, 4], [2, 6], [7, 5]] l2.sort() print(l2) l2.sort(key=s) print(l2) l2.sort(key=s1) print(l2) l3 = [[1, 2], [3, 4], [2, 6], [7, 5]] l3.sort(key=lambda x: x[0] + x[1]) print(l3)
code
105173227/cell_15
[ "text_plain_output_1.png" ]
d2 = {'anshu': 45, 'ayush': 42, 'moon': 12, 'bapun': 23} d3 = {key: 'Yes' if value > 40 else 'No' for key, value in d2.items()} print(d3)
code
105173227/cell_17
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except Exception as e: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except Exception as e: def add(a, b): return a + b print(add(3, 4)) add1 = lambda p, q: p + q print(add1(3, 4)) def compare(a, b): if a > b: return a else: return b print(compare(1, 2)) compare1 = lambda p, q: p if p > q else q print(compare1(2, 3)) print(compare1(2, 1)) def var(*a): print(a) var(1, 2, 3, 4, 5) var1 = lambda *a: print(a) var1(1, 2, 3, 4, 5)
code
105173227/cell_14
[ "text_plain_output_1.png" ]
d1 = {i: i ** 3 for i in range(1, 11)} print(d1)
code
105173227/cell_10
[ "text_plain_output_1.png" ]
l3 = [] for i in range(1, 11): if i % 2 == 0: l3.append(i) print(l3) l4 = [i for i in range(1, 11) if i % 2 == 0] print(l4)
code
105173227/cell_12
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) s1 = {i for i in range(1, 11) if i % 2 == 0} print(s1) s2 = {i ** 2 for i in range(1, 6)} print(s2)
code
105173227/cell_5
[ "text_plain_output_1.png" ]
s = '{} is a {} company' p = s.format('Google', 'tech') s2 = '{Company_name} is a {Company_type} company' p2 = s2.format(Company_type='tech', Company_name='Google') age = {'Anshuman': 22, 'Ayushman': 13, 'Bharati': 45} a = 'profit is {:,}' b = a.format(1234567890) try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 except: try: a = int(input('a=')) b = int(input('b=')) c = (a + b) / 2 print(c) except Exception as e: print('wrong values entered') print(e) print('Next line')
code
122256158/cell_13
[ "text_plain_output_1.png" ]
from tensorflow.keras.layers import Bidirectional from tensorflow.keras.layers import Conv1D from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import MaxPooling1D import tensorflow as tf class BiLSTM(tf.keras.Model): def __init__(self, NUM_WORDS, embedding_vector_length, num_of_class): super(BiLSTM, self).__init__() self.embedding = Embedding(input_dim=NUM_WORDS, output_dim=embedding_vector_length) self.BiLSTMl = Bidirectional(LSTM(100, return_sequences=True, recurrent_dropout=0.5)) self.conv = Conv1D(filters=100, kernel_size=5, padding='same', activation='relu') self.MaxPool = MaxPooling1D(pool_size=2) self.BiLSTM2 = Bidirectional(LSTM(100, return_sequences=False, recurrent_dropout=0.5)) self.dense = Dense(num_of_class, activation='sigmoid') def call(self, input_tensor): x = self.embedding(input_tensor) for _ in range(3): x = self.BiLSTMl(x) x = self.conv(x) x = self.MaxPool(x) x = self.BiLSTM2(x) x = self.dense(x) return x model = BiLSTM(NUM_WORDS=22, embedding_vector_length=100, num_of_class=2) model.build(input_shape=(None, 100)) model.summary()
code
122256158/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd def sequence_length_filter(df): """checks every protein sequence in 'Sequence' Column via for loop stores length of each sequence in sequence_length object if sequence_length is more than 6000 or less than 50 then drops that row where that particular sequence belongs updates the dataframe""" df.dropna(inplace=True) row_index = 0 for sequence in df['Sequence']: sequence_length = len(str(sequence)) if sequence_length > 6000 or sequence_length < 50: df.drop(df.index[row_index], inplace=True) row_index += 1 return df def irregular_sequence_filter(df): """checks every protein sequence in 'Sequence' Column via for loop then enumerate through every character and index of that character in that sequence within same for loop uses drop method to drop the particular row from dataframe where sequence character 'X' and 'Z' matches updates the dataframe""" for sequence in df['Sequence']: for index, character in enumerate(sequence): if character == 'U': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'X': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'Z': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) return df df_binding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/NON_RNA_binding_human.csv')) df_binding = irregular_sequence_filter(df_binding) df_binding['class'] = 'RNA binding' df_nonBinding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/RNA_binding_human.csv')) df_nonBinding = irregular_sequence_filter(df_nonBinding) df_nonBinding['class'] = 'NON-RNA binding' df_merged = pd.concat([df_binding, df_nonBinding], ignore_index=True, sort=False) df = df_merged.sample(frac=1).reset_index(drop=True) df
code
122256158/cell_11
[ "text_html_output_1.png" ]
(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
code
122256158/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd def sequence_length_filter(df): """checks every protein sequence in 'Sequence' Column via for loop stores length of each sequence in sequence_length object if sequence_length is more than 6000 or less than 50 then drops that row where that particular sequence belongs updates the dataframe""" df.dropna(inplace=True) row_index = 0 for sequence in df['Sequence']: sequence_length = len(str(sequence)) if sequence_length > 6000 or sequence_length < 50: df.drop(df.index[row_index], inplace=True) row_index += 1 return df def irregular_sequence_filter(df): """checks every protein sequence in 'Sequence' Column via for loop then enumerate through every character and index of that character in that sequence within same for loop uses drop method to drop the particular row from dataframe where sequence character 'X' and 'Z' matches updates the dataframe""" for sequence in df['Sequence']: for index, character in enumerate(sequence): if character == 'U': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'X': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'Z': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) return df df_binding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/NON_RNA_binding_human.csv')) df_binding = irregular_sequence_filter(df_binding) df_binding['class'] = 'RNA binding' df_nonBinding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/RNA_binding_human.csv')) df_nonBinding = irregular_sequence_filter(df_nonBinding) df_nonBinding['class'] = 'NON-RNA binding' df_merged = pd.concat([df_binding, df_nonBinding], ignore_index=True, sort=False) df = df_merged.sample(frac=1).reset_index(drop=True) def integer_encoding(data): """ - Encodes code sequence to integer values. - 20 common amino acids are taken into consideration and rest 4 are categorized as 0. """ encode_list = [] for row in data['Sequence']: row_encode = [] for code in row: row_encode.append(char_dict.get(code, 0)) encode_list.append(row_encode) return encode_list train_encode = integer_encoding(df)
code
122256158/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd def sequence_length_filter(df): """checks every protein sequence in 'Sequence' Column via for loop stores length of each sequence in sequence_length object if sequence_length is more than 6000 or less than 50 then drops that row where that particular sequence belongs updates the dataframe""" df.dropna(inplace=True) row_index = 0 for sequence in df['Sequence']: sequence_length = len(str(sequence)) if sequence_length > 6000 or sequence_length < 50: df.drop(df.index[row_index], inplace=True) row_index += 1 return df def irregular_sequence_filter(df): """checks every protein sequence in 'Sequence' Column via for loop then enumerate through every character and index of that character in that sequence within same for loop uses drop method to drop the particular row from dataframe where sequence character 'X' and 'Z' matches updates the dataframe""" for sequence in df['Sequence']: for index, character in enumerate(sequence): if character == 'U': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'X': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'Z': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) return df df_binding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/NON_RNA_binding_human.csv')) df_binding = irregular_sequence_filter(df_binding) df_binding['class'] = 'RNA binding' df_nonBinding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/RNA_binding_human.csv')) df_nonBinding = irregular_sequence_filter(df_nonBinding) df_nonBinding['class'] = 'NON-RNA binding' df_merged = pd.concat([df_binding, df_nonBinding], ignore_index=True, sort=False) df = df_merged.sample(frac=1).reset_index(drop=True) def integer_encoding(data): """ - Encodes code sequence to integer values. - 20 common amino acids are taken into consideration and rest 4 are categorized as 0. """ encode_list = [] for row in data['Sequence']: row_encode = [] for code in row: row_encode.append(char_dict.get(code, 0)) encode_list.append(row_encode) return encode_list train_encode = integer_encoding(df) for row in df['Sequence']: for code in row: print(char_dict.get(code, 0))
code
122256158/cell_12
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd def sequence_length_filter(df): """checks every protein sequence in 'Sequence' Column via for loop stores length of each sequence in sequence_length object if sequence_length is more than 6000 or less than 50 then drops that row where that particular sequence belongs updates the dataframe""" df.dropna(inplace=True) row_index = 0 for sequence in df['Sequence']: sequence_length = len(str(sequence)) if sequence_length > 6000 or sequence_length < 50: df.drop(df.index[row_index], inplace=True) row_index += 1 return df def irregular_sequence_filter(df): """checks every protein sequence in 'Sequence' Column via for loop then enumerate through every character and index of that character in that sequence within same for loop uses drop method to drop the particular row from dataframe where sequence character 'X' and 'Z' matches updates the dataframe""" for sequence in df['Sequence']: for index, character in enumerate(sequence): if character == 'U': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'X': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) elif character == 'Z': df.drop(df.loc[df['Sequence'] == sequence].index, inplace=True) return df df_binding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/NON_RNA_binding_human.csv')) df_binding = irregular_sequence_filter(df_binding) df_binding['class'] = 'RNA binding' df_nonBinding = sequence_length_filter(pd.read_csv('/kaggle/input/rna-binding-and-non-bindinghuman/RNA_binding_human.csv')) df_nonBinding = irregular_sequence_filter(df_nonBinding) df_nonBinding['class'] = 'NON-RNA binding' df_merged = pd.concat([df_binding, df_nonBinding], ignore_index=True, sort=False) df = df_merged.sample(frac=1).reset_index(drop=True) def integer_encoding(data): """ - Encodes code sequence to integer values. - 20 common amino acids are taken into consideration and rest 4 are categorized as 0. """ encode_list = [] for row in data['Sequence']: row_encode = [] for code in row: row_encode.append(char_dict.get(code, 0)) encode_list.append(row_encode) return encode_list train_encode = integer_encoding(df) codes = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'] def create_dict(codes): char_dict = {} for index, val in enumerate(codes): char_dict[val] = index + 1 return char_dict char_dict = create_dict(codes) print(char_dict)
code
129020042/cell_1
[ "application_vnd.jupyter.stderr_output_1.png" ]
data = pd.read_csv('/kaggle/input/sd2gpt2/gpt_generated_prompts.csv')
code
89132100/cell_13
[ "application_vnd.jupyter.stderr_output_1.png" ]
lookupLayersMap = dict() for column in categorical_features: unique_values = list(train[column].unique()) lookupLayersMap[column] = tf.keras.layers.StringLookup(vocabulary=unique_values)
code
89132100/cell_23
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from tensorflow import keras import math import numpy as np import numpy as np import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures]) def get_model(): dense_inputs = [] dense_vectors = [] for column in categorical_features: dense_input = keras.Input(shape=(1,), name=f'{column}_dense_input', dtype=tf.string) lookup = lookupLayersMap[column] vocab_size = len(lookup.get_vocabulary()) embed_dimension = math.ceil(np.sqrt(vocab_size)) dense_vector = lookup(dense_input) dense_vector = keras.layers.Embedding(vocab_size, embed_dimension, input_length=1)(dense_vector) dense_vector = keras.layers.Reshape((-1,))(dense_vector) dense_vectors.append(dense_vector) dense_inputs.append(dense_input) categorcal_vector = keras.layers.Concatenate(axis=-1)(dense_vectors) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) numeric_input = keras.Input(shape=(len(numerical_fetures),)) numeric_vector = normalization_layer(numeric_input) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) vector = keras.layers.Concatenate(axis=-1)([categorcal_vector, numeric_vector]) vector = keras.layers.Dense(32, activation='relu')(vector) output = keras.layers.Dense(1, activation='sigmoid')(vector) model = keras.Model(inputs=dense_inputs + [numeric_input], outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model() model.summary() models = [] kfold = StratifiedKFold(7, shuffle=True, random_state=2022) for fold, (train_indices, valid_indices) in enumerate(kfold.split(train, train_targets)): x_train = train.iloc[train_indices] x_val = train.iloc[valid_indices] y_train = train_targets.iloc[train_indices] y_val = train_targets.iloc[valid_indices] train_ds = make_dataset(x_train[categorical_features], x_train[numerical_fetures], y_train, mode='train') valid_ds = make_dataset(x_val[categorical_features], x_val[numerical_fetures], y_val) cp = keras.callbacks.ModelCheckpoint(f'model_{fold}.tf', monitor='val_accuracy', save_best_only=True, save_weights_only=True) es = keras.callbacks.EarlyStopping(patience=10) model = get_model() model.fit(train_ds, epochs=30, validation_data=valid_ds, callbacks=[cp, es]) model.load_weights(f'model_{fold}.tf') models.append(model) def preprocess_test(category, numeric): return (((category[0], category[1], category[2], category[3], category[4], category[5], category[6]), numeric), 0) def make_test_dataset(category_df, numeric_df, batch_size=32): dataset = tf.data.Dataset.from_tensor_slices((category_df, numeric_df)) dataset = dataset.map(preprocess_test) dataset = dataset.batch(batch_size) return dataset def inference(ds, models): y_pred = np.mean([model.predict(ds) for model in models], axis=0) y_pred = np.array(y_pred > 0.5, dtype=np.bool_) return y_pred test_ds = make_test_dataset(test[categorical_features], test[numerical_fetures]) test_ds submission = pd.read_csv('../input/spaceship-titanic/sample_submission.csv') submission['Transported'] = inference(test_ds, models) submission.to_csv('submission.csv', index=False) submission.head()
code
89132100/cell_11
[ "text_html_output_1.png" ]
from tensorflow import keras import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures])
code
89132100/cell_19
[ "image_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from tensorflow import keras import math import numpy as np import numpy as np import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures]) def get_model(): dense_inputs = [] dense_vectors = [] for column in categorical_features: dense_input = keras.Input(shape=(1,), name=f'{column}_dense_input', dtype=tf.string) lookup = lookupLayersMap[column] vocab_size = len(lookup.get_vocabulary()) embed_dimension = math.ceil(np.sqrt(vocab_size)) dense_vector = lookup(dense_input) dense_vector = keras.layers.Embedding(vocab_size, embed_dimension, input_length=1)(dense_vector) dense_vector = keras.layers.Reshape((-1,))(dense_vector) dense_vectors.append(dense_vector) dense_inputs.append(dense_input) categorcal_vector = keras.layers.Concatenate(axis=-1)(dense_vectors) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) numeric_input = keras.Input(shape=(len(numerical_fetures),)) numeric_vector = normalization_layer(numeric_input) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) vector = keras.layers.Concatenate(axis=-1)([categorcal_vector, numeric_vector]) vector = keras.layers.Dense(32, activation='relu')(vector) output = keras.layers.Dense(1, activation='sigmoid')(vector) model = keras.Model(inputs=dense_inputs + [numeric_input], outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model() model.summary() models = [] kfold = StratifiedKFold(7, shuffle=True, random_state=2022) for fold, (train_indices, valid_indices) in enumerate(kfold.split(train, train_targets)): x_train = train.iloc[train_indices] x_val = train.iloc[valid_indices] y_train = train_targets.iloc[train_indices] y_val = train_targets.iloc[valid_indices] train_ds = make_dataset(x_train[categorical_features], x_train[numerical_fetures], y_train, mode='train') valid_ds = make_dataset(x_val[categorical_features], x_val[numerical_fetures], y_val) cp = keras.callbacks.ModelCheckpoint(f'model_{fold}.tf', monitor='val_accuracy', save_best_only=True, save_weights_only=True) es = keras.callbacks.EarlyStopping(patience=10) model = get_model() model.fit(train_ds, epochs=30, validation_data=valid_ds, callbacks=[cp, es]) model.load_weights(f'model_{fold}.tf') models.append(model)
code
89132100/cell_16
[ "text_plain_output_1.png" ]
from tensorflow import keras import math import numpy as np import numpy as np import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures]) def get_model(): dense_inputs = [] dense_vectors = [] for column in categorical_features: dense_input = keras.Input(shape=(1,), name=f'{column}_dense_input', dtype=tf.string) lookup = lookupLayersMap[column] vocab_size = len(lookup.get_vocabulary()) embed_dimension = math.ceil(np.sqrt(vocab_size)) dense_vector = lookup(dense_input) dense_vector = keras.layers.Embedding(vocab_size, embed_dimension, input_length=1)(dense_vector) dense_vector = keras.layers.Reshape((-1,))(dense_vector) dense_vectors.append(dense_vector) dense_inputs.append(dense_input) categorcal_vector = keras.layers.Concatenate(axis=-1)(dense_vectors) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) numeric_input = keras.Input(shape=(len(numerical_fetures),)) numeric_vector = normalization_layer(numeric_input) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) vector = keras.layers.Concatenate(axis=-1)([categorcal_vector, numeric_vector]) vector = keras.layers.Dense(32, activation='relu')(vector) output = keras.layers.Dense(1, activation='sigmoid')(vector) model = keras.Model(inputs=dense_inputs + [numeric_input], outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model() model.summary()
code
89132100/cell_17
[ "text_plain_output_1.png" ]
from tensorflow import keras import math import numpy as np import numpy as np import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures]) def get_model(): dense_inputs = [] dense_vectors = [] for column in categorical_features: dense_input = keras.Input(shape=(1,), name=f'{column}_dense_input', dtype=tf.string) lookup = lookupLayersMap[column] vocab_size = len(lookup.get_vocabulary()) embed_dimension = math.ceil(np.sqrt(vocab_size)) dense_vector = lookup(dense_input) dense_vector = keras.layers.Embedding(vocab_size, embed_dimension, input_length=1)(dense_vector) dense_vector = keras.layers.Reshape((-1,))(dense_vector) dense_vectors.append(dense_vector) dense_inputs.append(dense_input) categorcal_vector = keras.layers.Concatenate(axis=-1)(dense_vectors) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) numeric_input = keras.Input(shape=(len(numerical_fetures),)) numeric_vector = normalization_layer(numeric_input) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) vector = keras.layers.Concatenate(axis=-1)([categorcal_vector, numeric_vector]) vector = keras.layers.Dense(32, activation='relu')(vector) output = keras.layers.Dense(1, activation='sigmoid')(vector) model = keras.Model(inputs=dense_inputs + [numeric_input], outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model() model.summary() keras.utils.plot_model(model, show_shapes=True)
code
89132100/cell_22
[ "text_plain_output_1.png" ]
from sklearn.model_selection import StratifiedKFold from tensorflow import keras import math import numpy as np import numpy as np import pandas as pd import pandas as pd import tensorflow as tf def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] def preprocess(x, y): return (((x[0][0], x[0][1], x[0][2], x[0][3], x[0][4], x[0][5], x[0][6]), x[1]), y) def make_dataset(category_df, numeric_df, target, batch_size=32, mode='train'): dataset = tf.data.Dataset.from_tensor_slices(((category_df, numeric_df), target)) dataset = dataset.map(preprocess) if mode == 'train': dataset = dataset.shuffle(buffer_size=batch_size) dataset = dataset.batch(batch_size).cache().prefetch(tf.data.AUTOTUNE) return dataset categorical_features = ['HomePlanet', 'Destination', 'Deck', 'Num', 'Side', 'FirstName', 'LastName'] numerical_fetures = ['CryoSleep', 'Age', 'VIP', 'RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck', 'TotalSpend', 'PctRoomService', 'PctFoodCourt', 'PctShoppingMall', 'PctSpa', 'PctVRDeck'] normalization_layer = keras.layers.Normalization() with tf.device('CPU'): normalization_layer.adapt(train[numerical_fetures]) def get_model(): dense_inputs = [] dense_vectors = [] for column in categorical_features: dense_input = keras.Input(shape=(1,), name=f'{column}_dense_input', dtype=tf.string) lookup = lookupLayersMap[column] vocab_size = len(lookup.get_vocabulary()) embed_dimension = math.ceil(np.sqrt(vocab_size)) dense_vector = lookup(dense_input) dense_vector = keras.layers.Embedding(vocab_size, embed_dimension, input_length=1)(dense_vector) dense_vector = keras.layers.Reshape((-1,))(dense_vector) dense_vectors.append(dense_vector) dense_inputs.append(dense_input) categorcal_vector = keras.layers.Concatenate(axis=-1)(dense_vectors) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) categorcal_vector = keras.layers.Dense(128, activation='relu')(categorcal_vector) categorcal_vector = keras.layers.Dropout(0.3)(categorcal_vector) categorcal_vector = keras.layers.BatchNormalization()(categorcal_vector) numeric_input = keras.Input(shape=(len(numerical_fetures),)) numeric_vector = normalization_layer(numeric_input) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) numeric_vector = keras.layers.Dense(128, activation='relu')(numeric_vector) numeric_vector = keras.layers.Dropout(0.3)(numeric_vector) vector = keras.layers.Concatenate(axis=-1)([categorcal_vector, numeric_vector]) vector = keras.layers.Dense(32, activation='relu')(vector) output = keras.layers.Dense(1, activation='sigmoid')(vector) model = keras.Model(inputs=dense_inputs + [numeric_input], outputs=output) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) return model model = get_model() model.summary() models = [] kfold = StratifiedKFold(7, shuffle=True, random_state=2022) for fold, (train_indices, valid_indices) in enumerate(kfold.split(train, train_targets)): x_train = train.iloc[train_indices] x_val = train.iloc[valid_indices] y_train = train_targets.iloc[train_indices] y_val = train_targets.iloc[valid_indices] train_ds = make_dataset(x_train[categorical_features], x_train[numerical_fetures], y_train, mode='train') valid_ds = make_dataset(x_val[categorical_features], x_val[numerical_fetures], y_val) cp = keras.callbacks.ModelCheckpoint(f'model_{fold}.tf', monitor='val_accuracy', save_best_only=True, save_weights_only=True) es = keras.callbacks.EarlyStopping(patience=10) model = get_model() model.fit(train_ds, epochs=30, validation_data=valid_ds, callbacks=[cp, es]) model.load_weights(f'model_{fold}.tf') models.append(model) def preprocess_test(category, numeric): return (((category[0], category[1], category[2], category[3], category[4], category[5], category[6]), numeric), 0) def make_test_dataset(category_df, numeric_df, batch_size=32): dataset = tf.data.Dataset.from_tensor_slices((category_df, numeric_df)) dataset = dataset.map(preprocess_test) dataset = dataset.batch(batch_size) return dataset def inference(ds, models): y_pred = np.mean([model.predict(ds) for model in models], axis=0) y_pred = np.array(y_pred > 0.5, dtype=np.bool_) return y_pred test_ds = make_test_dataset(test[categorical_features], test[numerical_fetures]) test_ds
code
89132100/cell_5
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd def fill_missing(data): data['HomePlanet'].fillna('None', inplace=True) data['CryoSleep'].fillna(False, inplace=True) data['Cabin'].fillna('Unknown/-1/Unknown', inplace=True) data['Destination'].fillna('None', inplace=True) data['Name'].fillna('Unknown Unknown', inplace=True) data['Age'].fillna(int(train['Age'].mode()), inplace=True) data['VIP'].fillna(False, inplace=True) for key in ['RoomService', 'FoodCourt', 'ShoppingMall', 'Spa', 'VRDeck']: data[key].fillna(data[key].median(), inplace=True) def feature_engineering(data): bool_type = ['VIP', 'CryoSleep'] data[bool_type] = data[bool_type].astype(int) data['Deck'] = data['Cabin'].apply(lambda item: str(item).split('/')[0]) data['Num'] = data['Cabin'].apply(lambda item: str(item).split('/')[1]) data['FirstName'] = data['Name'].apply(lambda item: item.split(' ')[0]) data['LastName'] = data['Name'].apply(lambda item: item.split(' ')[1]) data['Side'] = data['Cabin'].apply(lambda item: str(item).split('/')[2]) data['TotalSpend'] = data['RoomService'] + data['FoodCourt'] + data['ShoppingMall'] + data['Spa'] + data['VRDeck'] + 1e-08 data['PctRoomService'] = data['RoomService'] / data['TotalSpend'] data['PctFoodCourt'] = data['FoodCourt'] / data['TotalSpend'] data['PctShoppingMall'] = data['ShoppingMall'] / data['TotalSpend'] data['PctSpa'] = data['Spa'] / data['TotalSpend'] data['PctVRDeck'] = data['VRDeck'] / data['TotalSpend'] data.pop('Cabin') data.pop('PassengerId') data.pop('Name') train = pd.read_csv('../input/spaceship-titanic/train.csv') train_targets = train.pop('Transported').astype(int) test = pd.read_csv('../input/spaceship-titanic/test.csv') data = pd.concat([train, test]) fill_missing(data) feature_engineering(data) for column in data.columns: if 'int' in str(data[column].dtype): data[column] = data[column].astype(float) train = data.iloc[0:len(train)] test = data.iloc[len(train):] train.head()
code
50234066/cell_21
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import reverse_geocoder as rg train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df['coordinates'] = list(zip(train_df.longitude, train_df.latitude)) train_df address_dict = rg.search(train_df['coordinates'][2]) address_dict address_list = [] address_dict = rg.search(list(train_df['coordinates'])) for key in address_dict: address_list.append(list(key.values())[2]) list(set(address_list))
code
50234066/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() print(train_df.total_rooms.unique(), len(train_df.total_rooms.unique())) train_df.total_rooms.plot()
code
50234066/cell_23
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df['coordinates'] = list(zip(train_df.longitude, train_df.latitude)) train_df house_train_ids = train_df['HouseID'] train_df = train_df.drop(['longitude', 'latitude', 'coordinates', 'HouseID'], axis=1) import seaborn as sns import matplotlib.pyplot as plt corr_matrix = train_df.corr(method='pearson') sns.heatmap(corr_matrix, vmin=-1.0, vmax=1.0, annot=True, fmt='.2f', cmap='YlGnBu', cbar=True, linewidths=0.5) plt.title('pearson correlation')
code
50234066/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.describe()
code
50234066/cell_11
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() print(train_df.house_value.unique(), len(train_df.house_value.unique())) train_df.house_value.plot()
code
50234066/cell_19
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import reverse_geocoder as rg train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df['coordinates'] = list(zip(train_df.longitude, train_df.latitude)) train_df address_dict = rg.search(train_df['coordinates'][2]) address_dict
code
50234066/cell_1
[ "text_plain_output_1.png" ]
# This Python 3 environment comes with many helpful analytics libraries installed # It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python # For example, here's several helpful packages to load import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the read-only "../input/" directory # For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory !pip install reverse_geocoder import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename)) # You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All" # You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
code
50234066/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum()
code
50234066/cell_18
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df['coordinates'] = list(zip(train_df.longitude, train_df.latitude)) train_df
code
50234066/cell_8
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df['total_bedrooms'].mean()
code
50234066/cell_15
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() print(train_df.population.unique(), len(train_df.population.unique())) train_df.population.plot()
code
50234066/cell_16
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df.households.plot()
code
50234066/cell_17
[ "text_plain_output_2.png", "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() train_df.house_value.plot()
code
50234066/cell_14
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() print(train_df.total_bedrooms.unique(), len(train_df.total_bedrooms.unique())) train_df.total_bedrooms.plot()
code
50234066/cell_10
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum()
code
50234066/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df train_df.isna().sum() train_df.isna().sum() print(train_df.housing_median_age.unique(), len(train_df.housing_median_age.unique())) train_df.housing_median_age.plot()
code
50234066/cell_5
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train_df = pd.read_csv('/kaggle/input/applai-workshop-a1/Training Data - Training Data.csv') train_df
code
105179122/cell_4
[ "text_plain_output_1.png" ]
b = 2.3456 print(b)
code
105179122/cell_6
[ "text_plain_output_1.png" ]
c = 'world' print(c)
code
105179122/cell_2
[ "text_plain_output_1.png" ]
a = 10 print(a)
code
105179122/cell_7
[ "text_plain_output_1.png" ]
c = 'world' type(c)
code
105179122/cell_3
[ "text_plain_output_1.png" ]
a = 10 type(a)
code
105179122/cell_5
[ "text_plain_output_1.png" ]
b = 2.3456 type(b)
code
34120028/cell_4
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import numpy as np import numpy as np # linear algebra import numpy as np train_data, test_data = (imdb['train'], imdb['test']) training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s, l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s, l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) vocab_size = 10000 embedding_dim = 16 max_length = 120 trunc_type = 'post' oov_tok = '<OOV>' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences, maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length)
code
34120028/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import numpy as np import numpy as np # linear algebra import numpy as np train_data, test_data = (imdb['train'], imdb['test']) training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s, l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s, l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) vocab_size = 10000 embedding_dim = 16 max_length = 120 trunc_type = 'post' oov_tok = '<OOV>' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences, maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length) model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.Flatten(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary()
code
34120028/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
34120028/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import numpy as np import numpy as np # linear algebra import numpy as np train_data, test_data = (imdb['train'], imdb['test']) training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s, l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s, l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) vocab_size = 10000 embedding_dim = 16 max_length = 120 trunc_type = 'post' oov_tok = '<OOV>' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences, maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length) model = tf.keras.Sequential([tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length), tf.keras.layers.Flatten(), tf.keras.layers.Dense(6, activation='relu'), tf.keras.layers.Dense(1, activation='sigmoid')]) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() num_epochs = 10 model.fit(padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))
code
34120028/cell_3
[ "application_vnd.jupyter.stderr_output_1.png" ]
import numpy as np import numpy as np # linear algebra import numpy as np train_data, test_data = (imdb['train'], imdb['test']) training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s, l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s, l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels)
code
34120028/cell_5
[ "application_vnd.jupyter.stderr_output_1.png" ]
from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.preprocessing.text import Tokenizer import numpy as np import numpy as np # linear algebra import numpy as np train_data, test_data = (imdb['train'], imdb['test']) training_sentences = [] training_labels = [] testing_sentences = [] testing_labels = [] for s, l in train_data: training_sentences.append(str(s.numpy())) training_labels.append(l.numpy()) for s, l in test_data: testing_sentences.append(str(s.numpy())) testing_labels.append(l.numpy()) training_labels_final = np.array(training_labels) testing_labels_final = np.array(testing_labels) vocab_size = 10000 embedding_dim = 16 max_length = 120 trunc_type = 'post' oov_tok = '<OOV>' from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok) tokenizer.fit_on_texts(training_sentences) word_index = tokenizer.word_index sequences = tokenizer.texts_to_sequences(training_sentences) padded = pad_sequences(sequences, maxlen=max_length, truncating=trunc_type) testing_sequences = tokenizer.texts_to_sequences(testing_sentences) testing_padded = pad_sequences(testing_sequences, maxlen=max_length) reverse_word_index = dict([(value, key) for key, value in word_index.items()]) def decode_review(text): return ' '.join([reverse_word_index.get(i, '?') for i in text]) print(decode_review(padded[3])) print(training_sentences[3])
code
73067347/cell_4
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('train.csv') df.info() df.isnull().sum()
code
73067347/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('train.csv') df.isnull().sum() cats = df.dtypes == 'object' object_cols = list(cats[cats].index) print('Categorical Columns') print(object_cols)
code
73067347/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os os.chdir('/kaggle/input/30-days-of-ml') os.listdir()
code
73067347/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('train.csv') df.isnull().sum() cats = df.dtypes == 'object' object_cols = list(cats[cats].index) cat_features = [cat_val for cat_val in df.columns if 'cat' in cat_val] print(cat_features) num_cols = [col for col in df.columns if 'cont' in col] print(num_cols)
code
73067347/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('train.csv') df
code
73067347/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) df = pd.read_csv('train.csv') df.isnull().sum() df.describe(include='all')
code
18147692/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) sns.boxplot(data_age1.age)
code
18147692/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape
code
18147692/cell_33
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_gender_male = dataset[dataset['gender'] == 'male'] dataset_gender_female = dataset[dataset['gender'] == 'female'] dataset_gender_female.shape
code
18147692/cell_20
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()]
code
18147692/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas_profiling as pp import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape import pandas_profiling as pp pp.ProfileReport(dataset)
code
18147692/cell_40
[ "text_plain_output_1.png", "image_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] data_age1['mobile_surfing'] = data_age1.mobile_likes + data_age1.mobile_likes_received data_age1['web_surfing'] = data_age1.www_likes + data_age1.www_likes_received data_age1[data_age1.mobile_surfing > data_age1.web_surfing].shape data_age1[data_age1['mobile_surfing'] == data_age1['mobile_surfing'].max()]
code
18147692/cell_29
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_18 = dataset[dataset['age'] == 18] dataset_18_M = dataset_18[dataset_18['gender'] == 'male'] dataset_18_F = dataset_18[dataset_18['gender'] == 'female'] print(dataset_18_F.shape) print(dataset_18_M.shape)
code
18147692/cell_26
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd import seaborn as sns import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() data_age1.reset_index(inplace=True) datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_1k = data_age1.loc[data_age1['age'].isin(data1000['age'])] dataset_1k[dataset_1k['tenure'] == dataset_1k['tenure'].max()] data2000 = datacount[datacount['tenure'] >= 2000] dataset_2k = data_age1.loc[data_age1['age'].isin(data2000['age'])] dataset_2k[dataset_2k['tenure'] == dataset_2k['tenure'].max()] dataset_2k[dataset_2k['friend_count'] == dataset_2k['friend_count'].max()]
code
18147692/cell_48
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_gender_male = dataset[dataset['gender'] == 'male'] dataset_gender_female = dataset[dataset['gender'] == 'female'] dataset_gender_male.shape dataset_gender_female.shape print(dataset_gender_male.mobile_likes.sum()) print(dataset_gender_male.www_likes.sum()) print(dataset_gender_female.mobile_likes.sum()) print(dataset_gender_female.www_likes.sum())
code
18147692/cell_2
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input'))
code
18147692/cell_11
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() dataset['age'].value_counts().head()
code
18147692/cell_19
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] data1000.loc[:, ['age', 'tenure']].head()
code
18147692/cell_50
[ "text_html_output_1.png" ]
import pandas as pd import numpy as np import pandas as pd dataset = pd.read_csv('../input/pseudo_facebook.tsv', sep='\t') dataset.shape data_age1 = dataset.groupby('age').mean() datacount = dataset.groupby('age').count() datacount = datacount.reset_index() data1000 = datacount[datacount['tenure'] >= 1000] dataset_gender_male = dataset[dataset['gender'] == 'male'] dataset_gender_female = dataset[dataset['gender'] == 'female'] dataset_gender_male.shape dataset_gender_female.shape print(dataset_gender_male.likes_received.sum()) print(dataset_gender_female.likes_received.sum())
code