path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
128003343/cell_41 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
k1 = df_t['retention_1'].sum()
k2 = df_c['retention_1'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
k1 = df_t['retention_7'].sum()
k2 = df_c['retention_7'].sum()
(k1, k2) | code |
128003343/cell_52 | [
"text_plain_output_1.png"
] | from statsmodels.stats import proportion
import math
import numpy as np
import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
k1 = df_t['retention_1'].sum()
k2 = df_c['retention_1'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2]))
chisq, pvalue, table = proportion.proportions_chisquare(np.array([k1, k2]), np.array([n1, n2]))
k1 = df_t['retention_7'].sum()
k2 = df_c['retention_7'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2]))
# Критерий пропорций (для кликов, конверсий)
alpha = 0.05 #
power = 0.9 #
n = df_retention_ab['userid'].min() # Количество наблюдений.
p_x = df_t.describe(include='all').loc['mean']['retention_7'] # Конверсии.
p_y = df_c.describe(include='all').loc['mean']['retention_7']
h = 2 * math.asin(np.sqrt(p_x)) - 2 * math.asin(np.sqrt(p_y))
h | code |
128003343/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df.info() | code |
128003343/cell_18 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
sns.boxplot(x=df['version'], y=df['sum_gamerounds'], showfliers=False)
plt.title('number of rounds distribution in 2 groups') | code |
128003343/cell_32 | [
"image_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2) | code |
128003343/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df.describe() | code |
128003343/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab | code |
128003343/cell_47 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df[(df['retention_1'] == 0) & (df['retention_7'] == 1)] | code |
128003343/cell_17 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
(ret1_dif, ret7_dif) | code |
128003343/cell_43 | [
"text_plain_output_1.png"
] | from statsmodels.stats import proportion
import numpy as np
import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
k1 = df_t['retention_1'].sum()
k2 = df_c['retention_1'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2]))
chisq, pvalue, table = proportion.proportions_chisquare(np.array([k1, k2]), np.array([n1, n2]))
k1 = df_t['retention_7'].sum()
k2 = df_c['retention_7'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2]))
print('Results are ', 'z_score =%.3f, pvalue = %.3f' % (z_score, z_pvalue)) | code |
128003343/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
k1 = df_t['retention_1'].sum()
k2 = df_c['retention_1'].sum()
(k1, k2) | code |
128003343/cell_24 | [
"text_plain_output_1.png"
] | from scipy.stats import ttest_1samp, mannwhitneyu, shapiro, norm, t, kstest, shapiro
import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
print(kstest(df['sum_gamerounds'], 'norm')) | code |
128003343/cell_27 | [
"text_html_output_1.png"
] | from scipy.stats import ttest_1samp, mannwhitneyu, shapiro, norm, t, kstest, shapiro
import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
mannwhitneyu(x=df_c['sum_gamerounds'].values, y=df_t['sum_gamerounds'].values) | code |
128003343/cell_37 | [
"text_plain_output_1.png"
] | if abs(pvalue) < 0.05:
print('We may reject the null hypothesis!')
else:
print('We have failed to reject the null hypothesis') | code |
128003343/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df['userid'].nunique() == df['userid'].count() | code |
128003343/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df.head(10) | code |
128003343/cell_36 | [
"text_plain_output_1.png"
] | from statsmodels.stats import proportion
import numpy as np
import pandas as pd
SHEET_ID = '10DM_ZqwV6CvOryD6u3pYHb5scjEfiwfkitUGTCG8l7Y'
SHEET_NAME = 'AAPL'
url = f'https://docs.google.com/spreadsheets/d/{SHEET_ID}/gviz/tq?tqx=out:csv&sheet={SHEET_NAME}'
df = pd.read_csv(url, decimal=',')
df_retention_ab = df.groupby('version').agg({'userid': 'count', 'retention_1': 'mean', 'retention_7': 'mean', 'sum_gamerounds': 'sum'})
df_retention_ab
df_c = df[df['version'] == 'gate_30']
df_t = df[df['version'] == 'gate_40']
#calc of difference of retentionin between 2 groups
ret1_dif = (df_c.describe(include='all').loc['mean']['retention_1'] - df_t.describe(include='all').loc['mean']['retention_1']) / df_t.describe(include='all').loc['mean']['retention_1']
ret7_dif = (df_c.describe(include='all').loc['mean']['retention_7'] - df_t.describe(include='all').loc['mean']['retention_7']) / df_t.describe(include='all').loc['mean']['retention_7']
ret1_dif, ret7_dif
k1 = df_t['retention_1'].sum()
k2 = df_c['retention_1'].sum()
(k1, k2)
n1 = df_t.shape[0]
n2 = df_c.shape[0]
(n1, n2)
z_score, z_pvalue = proportion.proportions_ztest(np.array([k1, k2]), np.array([n1, n2]))
chisq, pvalue, table = proportion.proportions_chisquare(np.array([k1, k2]), np.array([n1, n2]))
print('Results are ', 'chisq =%.3f, pvalue = %.3f' % (chisq, pvalue)) | code |
330371/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Parch'].value_counts(sort=False) | code |
330371/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Sex'].value_counts() | code |
330371/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.head() | code |
330371/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Fare'].value_counts().head(20) | code |
330371/cell_6 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.info() | code |
330371/cell_40 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std | code |
330371/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Cabin'].value_counts(dropna=False)[:20] | code |
330371/cell_39 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean | code |
330371/cell_41 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
people_w_unknown_age.head(10) | code |
330371/cell_54 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
df.pivot_table('Survived', index=['Sex', 'Pclass'], columns=['family'], margins=True) | code |
330371/cell_19 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['SibSp'].value_counts(sort=False) | code |
330371/cell_50 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
df['family'] = df['SibSp'] + df['Parch']
df['family'].value_counts() | code |
330371/cell_52 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
for pclass in [1, 2, 3]:
df[df['Pclass'] == pclass]['Age'].plot.kde(figsize=(12, 10))
df[df['Pclass'] == pclass]['new_age'].plot.kde()
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(0, None)
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
sns.factorplot(x='Sex', y='Survived', data=df, kind='bar', size=5, ci=None, hue='family')
plt.title('Survival Rate by Gender and Family Size') | code |
330371/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe() | code |
330371/cell_45 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
for pclass in [1, 2, 3]:
plt.subplot(211)
df[df['Pclass'] == pclass]['Age'].plot.kde(figsize=(12, 10))
plt.subplot(212)
df[df['Pclass'] == pclass]['new_age'].plot.kde()
plt.suptitle('Age Density by Passenger Class', size=12)
plt.subplot(211)
plt.xlabel('Age - before filling missing values')
plt.legend(('1st Class', '2nd Class', '3rd Class'))
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.subplot(212)
plt.xlabel('Age - values filled')
plt.legend(('1st Class', '2nd Class', '3rd Class'))
plt.xlim(-10, 90)
plt.ylim(0, 0.05) | code |
330371/cell_49 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
df.tail(5) | code |
330371/cell_51 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
for pclass in [1, 2, 3]:
df[df['Pclass'] == pclass]['Age'].plot.kde(figsize=(12, 10))
df[df['Pclass'] == pclass]['new_age'].plot.kde()
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(0, None)
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
sns.factorplot(x='family', y='Survived', data=df, kind='bar', size=5, ci=None)
plt.title('Survival Rate by Family Size') | code |
330371/cell_8 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df.select_dtypes(include=['object']).describe() | code |
330371/cell_16 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Age'].value_counts(dropna=False)[:20] | code |
330371/cell_47 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
for pclass in [1, 2, 3]:
df[df['Pclass'] == pclass]['Age'].plot.kde(figsize=(12, 10))
df[df['Pclass'] == pclass]['new_age'].plot.kde()
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
sns.regplot(x='new_age', y='Survived', data=df, x_bins=50, x_ci=None)
plt.xlim(0, None)
plt.title('Survival Rate by Age Group') | code |
330371/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Age'].hist(bins=20) | code |
330371/cell_35 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
sns.factorplot(x='Sex', y='Survived', data=df, kind='bar', size=5, ci=None)
plt.title('Survival Rate by Gender') | code |
330371/cell_43 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
df.head(7) | code |
330371/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Embarked'].value_counts(dropna=False) | code |
330371/cell_53 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
ages_mean = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='mean')
ages_mean
ages_std = df.pivot_table('Age', index=['Title'], columns=['Sex', 'Pclass'], aggfunc='std')
ages_std
def age_guesser(person):
gender = person['Sex']
mean_age = ages_mean[gender].loc[person['Title'], person['Pclass']]
std = ages_std[gender].loc[person['Title'], person['Pclass']]
persons_age = np.random.randint(mean_age - std, mean_age + std)
return persons_age
unknown_age = df['Age'].isnull()
people_w_unknown_age = df.loc[unknown_age, ['Age', 'Title', 'Sex', 'Pclass']]
people_w_unknown_age['Age'] = people_w_unknown_age.apply(age_guesser, axis=1)
known_age = df['Age'].notnull()
people_w_known_age = df.loc[known_age, ['Age', 'Title', 'Sex', 'Pclass']]
df['new_age'] = pd.concat([people_w_known_age['Age'], people_w_unknown_age['Age']])
for pclass in [1, 2, 3]:
df[df['Pclass'] == pclass]['Age'].plot.kde(figsize=(12, 10))
df[df['Pclass'] == pclass]['new_age'].plot.kde()
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(-10, 90)
plt.ylim(0, 0.05)
plt.xlim(0, None)
df['parent'] = 0
df.loc[(df.Parch > 0) & (df.new_age >= 18), 'parent'] = 1
df['child'] = 0
df.loc[(df.Parch > 0) & (df.new_age < 18), 'child'] = 1
sns.factorplot(x='Pclass', y='Survived', data=df, kind='bar', size=5, aspect=1.5, ci=None, hue='family')
plt.title('Survival Rate by Class and Family Size') | code |
330371/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Survived'].value_counts() | code |
330371/cell_27 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Ticket'].value_counts()[:20] | code |
330371/cell_37 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
sns.factorplot(x='Pclass', y='Survived', hue='Sex', data=df, kind='bar', size=5, aspect=1.5, ci=None)
plt.title('Survival Rate by Class and Gender') | code |
330371/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
df['Pclass'].value_counts(sort=False) | code |
330371/cell_36 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('../input/train.csv', index_col='PassengerId')
df.dropna().describe()
sns.factorplot(x='Pclass', y='Survived', data=df, kind='bar', size=5, ci=None)
plt.title('Survival Rate by Class') | code |
1007016/cell_13 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
t = pd.read_csv('../input/test.csv')
X_t = t.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']] | code |
1007016/cell_4 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
d['Male'] = d['Sex'] == 'male'
n = d['Age'].mean()
d['Class1'] = d['Pclass'] == 1
d['Class2'] = d['Pclass'] == 2
d['Age'].fillna(n, inplace=True) | code |
1007016/cell_2 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
d.head() | code |
1007016/cell_11 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
t = pd.read_csv('../input/test.csv') | code |
1007016/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv') | code |
1007016/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
d = pd.read_csv('../input/train.csv')
X = d.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
y = d['Survived']
thisclf = DecisionTreeClassifier()
thisclf.fit(X, y) | code |
1007016/cell_8 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
d = pd.read_csv('../input/train.csv')
X = d.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
y = d['Survived']
thisclf = DecisionTreeClassifier()
thisclf.fit(X, y)
d['predicted'] = thisclf.predict(X) | code |
1007016/cell_15 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
t = pd.read_csv('../input/test.csv')
X_t = t.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
t_out = t.loc[:, ['PassengerId', 'Survived']] | code |
1007016/cell_16 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
t = pd.read_csv('../input/test.csv')
X_t = t.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
t_out = t.loc[:, ['PassengerId', 'Survived']]
t_out.to_csv('out.csv') | code |
1007016/cell_14 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.tree import DecisionTreeClassifier
d = pd.read_csv('../input/train.csv')
X = d.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
y = d['Survived']
thisclf = DecisionTreeClassifier()
thisclf.fit(X, y)
d['predicted'] = thisclf.predict(X)
t = pd.read_csv('../input/test.csv')
X_t = t.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
t['Survived'] = thisclf.predict(X_t) | code |
1007016/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | from sklearn.metrics import accuracy_score
d = pd.read_csv('../input/train.csv')
X = d.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
y = d['Survived']
accuracy_score(y, d['predicted']) | code |
1007016/cell_12 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
t = pd.read_csv('../input/test.csv')
t['Male'] = t['Sex'] == 'male'
nn = t['Age'].mean()
t['Class1'] = t['Pclass'] == 1
t['Class2'] = t['Pclass'] == 2
t['Age'].fillna(nn, inplace=True)
f = t['Fare'].mean()
t['Fare'].fillna(f, inplace=True) | code |
1007016/cell_5 | [
"application_vnd.jupyter.stderr_output_1.png"
] | d = pd.read_csv('../input/train.csv')
X = d.loc[:, ['Class1', 'Class2', 'Male', 'Age', 'SibSp', 'Parch', 'Fare']]
y = d['Survived'] | code |
73084725/cell_13 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.isna().sum()
df_train.hist() | code |
73084725/cell_23 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.isna().sum()
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns | code |
73084725/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns | code |
73084725/cell_44 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_all['Seasons'].value_counts() | code |
73084725/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.describe() | code |
73084725/cell_40 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_all | code |
73084725/cell_29 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all | code |
73084725/cell_26 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all | code |
73084725/cell_48 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all['Date'] = pd.to_datetime(data_all['Date'], format='%d/%m/%Y')
data_all[['Date']]
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_with_dummies = pd.get_dummies(data_all, columns=['Functioning Day', 'day', 'Holiday'], drop_first=True)
data_with_dummies | code |
73084725/cell_54 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all['Date'] = pd.to_datetime(data_all['Date'], format='%d/%m/%Y')
data_all[['Date']]
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_with_dummies = pd.get_dummies(data_all, columns=['Functioning Day', 'day', 'Holiday'], drop_first=True)
data_with_dummies
data_with_dummies.drop(['ID', 'Date', 'Dew point temperature(�C)', 'Snowfall (cm)', 'y'], axis='columns', inplace=True)
data_with_dummies['y'] = y
train_data = data_with_dummies[data_with_dummies['y'].notna()]
test_data = data_with_dummies[data_with_dummies['y'].isna()]
train_data.drop(['y'], axis='columns', inplace=True)
test_data.drop(['y'], axis=1, inplace=True) | code |
73084725/cell_11 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.isna().sum() | code |
73084725/cell_7 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.describe() | code |
73084725/cell_45 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_all['Holiday'].value_counts() | code |
73084725/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_train.isna().sum()
df_train.describe() | code |
73084725/cell_32 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum() | code |
73084725/cell_8 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test | code |
73084725/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
table | code |
73084725/cell_47 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_all['day'].value_counts() | code |
73084725/cell_31 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all | code |
73084725/cell_46 | [
"text_plain_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all.isna().sum()
data_all.columns
Mean_encoded_Seasons = data_all.groupby('Seasons')['y'].mean().to_dict()
data_all['Seasons'] = data_all['Seasons'].map(Mean_encoded_Seasons)
Mean_encoded_dew = data_all.groupby('month')['y'].mean().to_dict()
data_all['month'] = data_all['month'].map(Mean_encoded_dew)
data_all['Functioning Day'].value_counts() | code |
73084725/cell_24 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_test.isna().sum()
df_test.columns | code |
73084725/cell_27 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_train.isna().sum()
df_test.isna().sum()
table = pd.crosstab(df_train['Holiday'], df_train['Functioning Day'])
y = df_train['y'].copy()
df_train.drop(['y'], axis='columns', inplace=True)
df_train['y'] = y
df_train.columns
df_test.columns
data_all = pd.concat((df_train, df_test), ignore_index=True)
data_all['Date'] = pd.to_datetime(data_all['Date'], format='%d/%m/%Y')
data_all[['Date']] | code |
73084725/cell_12 | [
"text_html_output_1.png"
] | import pandas as pd
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv')
df_test = pd.read_csv('/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv')
df_test
df_test.isna().sum() | code |
73084725/cell_5 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
128039471/cell_21 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df[(df['test preparation course'] == 'none') & (df['gender'] == 'male')]['math score'], label='Male-None')
plt.hist(df[(df['test preparation course'] == 'completed') & (df['gender'] == 'male')]['math score'], label='Male-Completed')
plt.legend()
plt.show() | code |
128039471/cell_13 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df[df['gender'] == 'male']['math score'], label='male')
plt.legend()
plt.show() | code |
128039471/cell_20 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df[df['test preparation course'] == 'none']['math score'], label='All-None')
plt.hist(df[df['test preparation course'] == 'completed']['math score'], label='ALL-completed')
plt.legend()
plt.show() | code |
128039471/cell_6 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
total_count = len(df)
total_count | code |
128039471/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
men_count = len(df[df['gender'] == 'male'])
female_count = len(df[df['gender'] == 'female'])
print('men_count:', men_count)
print('female_count:', female_count) | code |
128039471/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
avg_all_test_completed = df[df['test preparation course'] == 'completed']['math score'].mean()
avg_all_test_none = df[df['test preparation course'] == 'none']['math score'].mean()
print('avg_all_test_completed:', avg_all_test_completed)
print('avg_all_test_none:', avg_all_test_none)
avg_men_test_completed = df[(df['test preparation course'] == 'completed') & (df['gender'] == 'male')]['math score'].mean()
avg_men_test_none = df[(df['test preparation course'] == 'none') & (df['gender'] == 'male')]['math score'].mean()
print('avg_men_test_completed:', avg_men_test_completed)
print('avg_men_test_none:', avg_men_test_none)
avg_female_test_completed = df[(df['test preparation course'] == 'completed') & (df['gender'] == 'female')]['math score'].mean()
avg_female_test_none = df[(df['test preparation course'] == 'none') & (df['gender'] == 'female')]['math score'].mean()
print('avg_female__test_completed:', avg_female_test_completed)
print('avg_female_test_none:', avg_female_test_none) | code |
128039471/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
total_count = len(df)
total_count
men_count = len(df[df['gender'] == 'male'])
female_count = len(df[df['gender'] == 'female'])
proportion_men = men_count / total_count
proportion_female = female_count / total_count
print('proportion_men:', proportion_men)
print('proportion_female:', proportion_female) | code |
128039471/cell_16 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
std_ms_all = df['math score'].std()
std_ms_men = df[df['gender'] == 'male']['math score'].std()
std_ms_female = df[df['gender'] == 'female']['math score'].std()
print('std_ms_all:', std_ms_all)
print('std_ms_men:', std_ms_men)
print('std_ms_female:', std_ms_female) | code |
128039471/cell_3 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.head() | code |
128039471/cell_14 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df[df['gender'] == 'male']['math score'], label='female')
plt.legend()
plt.show() | code |
128039471/cell_22 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df[(df['test preparation course'] == 'none') & (df['gender'] == 'female')]['math score'], label='Female-None')
plt.hist(df[(df['test preparation course'] == 'completed') & (df['gender'] == 'female')]['math score'], label='Female-Completed')
plt.legend()
plt.show() | code |
128039471/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
avg_ms_all = df['math score'].mean()
avg_ms_men = df[df['gender'] == 'male']['math score'].mean()
avg_ms_female = df[df['gender'] == 'female']['math score'].mean()
print('avg_ms_all:', avg_ms_all)
print('avg_ms_men:', avg_ms_men)
print('avg_ms_female:', avg_ms_female) | code |
128039471/cell_12 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count()
plt.hist(df['math score'], label='ALL')
plt.legend()
plt.show() | code |
128039471/cell_5 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/students-performance-in-exams/StudentsPerformance.csv')
df.count() | code |
34150890/cell_2 | [
"text_plain_output_1.png"
] | import os
import os
import numpy as np
import pandas as pd
import os
import numpy as np
import pandas as pd
import os
import keras
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras import regularizers
from sklearn.model_selection import train_test_split
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
from keras.applications import VGG16
from keras import models
from keras import layers
from keras import optimizers
print(os.listdir('../input')) | code |
34150890/cell_11 | [
"text_plain_output_1.png"
] | from keras import layers
from keras import models
from keras.applications import VGG16
from sklearn.model_selection import train_test_split
import cv2
import keras
import matplotlib.pyplot as plt
import numpy as np
import numpy as np # linear algebra
import os
import os
import numpy as np
import pandas as pd
import os
import numpy as np
import pandas as pd
import os
import keras
from keras.layers import Conv2D, MaxPool2D, Flatten, Dense, Dropout, BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras import regularizers
from sklearn.model_selection import train_test_split
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
from keras.applications import VGG16
from keras import models
from keras import layers
from keras import optimizers
os.listdir('../input/isl-dataset-double-handed')
train_dir = '../input/isl-dataset-double-handed/ISL_Dataset'
def load_unique():
size_img = 224,224
images_for_plot = []
labels_for_plot = []
for folder in os.listdir(train_dir):
for file in os.listdir(train_dir + '/' + folder):
filepath = train_dir + '/' + folder + '/' + file
image = cv2.imread(filepath)
final_img = cv2.resize(image, size_img)
final_img = cv2.cvtColor(final_img, cv2.COLOR_BGR2RGB)
images_for_plot.append(final_img)
labels_for_plot.append(folder)
break
return images_for_plot, labels_for_plot
images_for_plot, labels_for_plot = load_unique()
print("unique_labels = ", labels_for_plot)
fig = plt.figure(figsize = (15,15))
def plot_images(fig, image, label, row, col, index):
fig.add_subplot(row, col, index)
plt.axis('off')
plt.imshow(image)
plt.title(label)
return
image_index = 0
row = 4
col = 6
for i in range(1,25):
plot_images(fig, images_for_plot[image_index], labels_for_plot[image_index], row, col, i)
image_index = image_index + 1
plt.show()
l1 = []
def load_data():
"""
Loads data and preprocess. Returns train and test data along with labels.
"""
images = []
labels = []
size = (224, 224)
for folder in os.listdir(train_dir):
for image in os.listdir(train_dir + '/' + folder):
temp_img = cv2.imread(train_dir + '/' + folder + '/' + image)
temp_img = cv2.resize(temp_img, size)
images.append(temp_img)
labels.append(ord(folder) - 97)
images = np.array(images)
for i in range(len(images)):
images[i] = images[i].astype('float32') / 255
l1 = labels
labels = keras.utils.to_categorical(labels)
X_train, X_test, Y_train, Y_test = train_test_split(images, labels, test_size=0.25)
return (X_train, X_test, Y_train, Y_test, l1)
def create_model1():
vgg_conv = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
for layer in vgg_conv.layers[:-4]:
layer.trainable = False
model = models.Sequential()
model.add(vgg_conv)
model.add(layers.Flatten())
model.add(layers.Dense(1024, activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(26, activation='softmax'))
model.compile(optimizer='adam', loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
model.summary()
return model
def fit_model():
model_hist = model.fit(X_train, Y_train, batch_size=64, epochs=8, validation_split=0.15)
return model_hist
model = create_model1()
curr_model_hist = fit_model()
plt.plot(curr_model_hist.history['accuracy'])
plt.plot(curr_model_hist.history['val_accuracy'])
plt.legend(['train', 'test'], loc='lower right')
plt.title('accuracy plot - train vs test')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
plt.plot(curr_model_hist.history['loss'])
plt.plot(curr_model_hist.history['val_loss'])
plt.legend(['training loss', 'validation loss'], loc='upper right')
plt.title('loss plot - training vs vaidation')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.