path
stringlengths 13
17
| screenshot_names
sequencelengths 1
873
| code
stringlengths 0
40.4k
| cell_type
stringclasses 1
value |
---|---|---|---|
49124799/cell_7 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
for i in df:
print(i, df[i].isna().sum()) | code |
49124799/cell_18 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
df.shape
df.drop(columns=['language', 'bio'], inplace=True)
df.shape
a = df.dtypes
for i in a:
print(a[0]) | code |
49124799/cell_8 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
len(df['language'].value_counts()) | code |
49124799/cell_15 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
df.shape
df.drop(columns=['language', 'bio'], inplace=True)
df.shape | code |
49124799/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.head() | code |
49124799/cell_35 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
data = pd.DataFrame()
data.shape | code |
49124799/cell_31 | [
"text_html_output_1.png"
] | from sklearn.metrics.pairwise import cosine_similarity
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
df.shape
lan = []
for i in df['language']:
l = i.split(',')
for j in l:
if j not in lan:
lan.append(j)
XX = {}
for i in lan:
l = []
for j in df['language']:
if i in j.split(','):
l.append(1)
else:
l.append(0)
XX[i] = l
df.drop(columns=['language', 'bio'], inplace=True)
df.shape
a = df.dtypes
df.shape
X = df.values
X.shape
ans = []
for i in X:
l = []
for j in X:
l.append(cosine_similarity([i], [j]) * 100)
ans.append(l)
a = []
for i in ans:
l = []
for j in i:
l.append(j[0][0])
a.append(l)
a[:2] | code |
49124799/cell_22 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
df.drop(columns=['user_id', 'username'], inplace=True)
df.shape
df.drop(columns=['language', 'bio'], inplace=True)
df.shape
a = df.dtypes
df.head() | code |
49124799/cell_37 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv('../input/hackerearth-love-in-the-time-of-screens/data.csv')
user_id = df['user_id']
data = pd.DataFrame()
data.shape
data.set_index(user_id, inplace=True)
data.head() | code |
2000829/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train['Age'] = train['Age'].fillna(-0.5)
test['Age'] = test['Age'].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels)
test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels)
mr_age = train[train['Title'] == 1]['AgeGroup'].mode()
miss_age = train[train['Title'] == 2]['AgeGroup'].mode()
mrs_age = train[train['Title'] == 3]['AgeGroup'].mode()
master_age = train[train['Title'] == 4]['AgeGroup'].mode()
royal_age = train[train['Title'] == 5]['AgeGroup'].mode()
rare_age = train[train['Title'] == 6]['AgeGroup'].mode()
age_title_mapping = {1: 'Young Adult', 2: 'Student', 3: 'Adult', 4: 'Baby', 5: 'Adult', 6: 'Adult'}
for x in range(len(train['AgeGroup'])):
if train['AgeGroup'][x] == 'Unknown':
train['AgeGroup'][x] = age_title_mapping[train['Title'][x]]
for x in range(len(test['AgeGroup'])):
if test['AgeGroup'][x] == 'Unknown':
test['AgeGroup'][x] = age_title_mapping[test['Title'][x]]
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train = train.drop(['Age'], axis=1)
test = test.drop(['Age'], axis=1)
for x in range(len(test['Fare'])):
if pd.isnull(test['Fare'][x]):
pclass = test['Pclass'][x]
test['Fare'][x] = round(train[train['Pclass'] == pclass]['Fare'].mean(), 4)
train['FareBand'] = pd.qcut(train['Fare'], 4, labels=[1, 2, 3, 4])
test['FareBand'] = pd.qcut(test['Fare'], 4, labels=[1, 2, 3, 4])
train = train.drop(['Fare'], axis=1)
test = test.drop(['Fare'], axis=1)
train = train.drop(['Title', 'Name'], axis=1)
test = test.drop(['Title', 'Name'], axis=1)
sex_mapping = {'male': 0, 'female': 1}
train['Sex'] = train['Sex'].map(sex_mapping)
test['Sex'] = test['Sex'].map(sex_mapping)
train.head() | code |
2000829/cell_4 | [
"text_plain_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train.describe(include='all') | code |
2000829/cell_1 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import warnings
import tensorflow as tf
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
warnings.filterwarnings('ignore') | code |
2000829/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train['Age'] = train['Age'].fillna(-0.5)
test['Age'] = test['Age'].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels)
test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels)
mr_age = train[train['Title'] == 1]['AgeGroup'].mode()
miss_age = train[train['Title'] == 2]['AgeGroup'].mode()
mrs_age = train[train['Title'] == 3]['AgeGroup'].mode()
master_age = train[train['Title'] == 4]['AgeGroup'].mode()
royal_age = train[train['Title'] == 5]['AgeGroup'].mode()
rare_age = train[train['Title'] == 6]['AgeGroup'].mode()
age_title_mapping = {1: 'Young Adult', 2: 'Student', 3: 'Adult', 4: 'Baby', 5: 'Adult', 6: 'Adult'}
for x in range(len(train['AgeGroup'])):
if train['AgeGroup'][x] == 'Unknown':
train['AgeGroup'][x] = age_title_mapping[train['Title'][x]]
for x in range(len(test['AgeGroup'])):
if test['AgeGroup'][x] == 'Unknown':
test['AgeGroup'][x] = age_title_mapping[test['Title'][x]]
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train = train.drop(['Age'], axis=1)
test = test.drop(['Age'], axis=1)
for x in range(len(test['Fare'])):
if pd.isnull(test['Fare'][x]):
pclass = test['Pclass'][x]
test['Fare'][x] = round(train[train['Pclass'] == pclass]['Fare'].mean(), 4)
train['FareBand'] = pd.qcut(train['Fare'], 4, labels=[1, 2, 3, 4])
test['FareBand'] = pd.qcut(test['Fare'], 4, labels=[1, 2, 3, 4])
train = train.drop(['Fare'], axis=1)
test = test.drop(['Fare'], axis=1)
test.describe(include='all') | code |
2000829/cell_15 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train['Age'] = train['Age'].fillna(-0.5)
test['Age'] = test['Age'].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels)
test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels)
mr_age = train[train['Title'] == 1]['AgeGroup'].mode()
miss_age = train[train['Title'] == 2]['AgeGroup'].mode()
mrs_age = train[train['Title'] == 3]['AgeGroup'].mode()
master_age = train[train['Title'] == 4]['AgeGroup'].mode()
royal_age = train[train['Title'] == 5]['AgeGroup'].mode()
rare_age = train[train['Title'] == 6]['AgeGroup'].mode()
age_title_mapping = {1: 'Young Adult', 2: 'Student', 3: 'Adult', 4: 'Baby', 5: 'Adult', 6: 'Adult'}
for x in range(len(train['AgeGroup'])):
if train['AgeGroup'][x] == 'Unknown':
train['AgeGroup'][x] = age_title_mapping[train['Title'][x]]
for x in range(len(test['AgeGroup'])):
if test['AgeGroup'][x] == 'Unknown':
test['AgeGroup'][x] = age_title_mapping[test['Title'][x]]
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train = train.drop(['Age'], axis=1)
test = test.drop(['Age'], axis=1)
for x in range(len(test['Fare'])):
if pd.isnull(test['Fare'][x]):
pclass = test['Pclass'][x]
test['Fare'][x] = round(train[train['Pclass'] == pclass]['Fare'].mean(), 4)
train['FareBand'] = pd.qcut(train['Fare'], 4, labels=[1, 2, 3, 4])
test['FareBand'] = pd.qcut(test['Fare'], 4, labels=[1, 2, 3, 4])
train = train.drop(['Fare'], axis=1)
test = test.drop(['Fare'], axis=1)
train = train.drop(['Title', 'Name'], axis=1)
test = test.drop(['Title', 'Name'], axis=1)
sex_mapping = {'male': 0, 'female': 1}
train['Sex'] = train['Sex'].map(sex_mapping)
test['Sex'] = test['Sex'].map(sex_mapping)
X = tf.placeholder(np.float32, [None, 5])
Y = tf.placeholder(np.float32, [None, 1])
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
W1 = tf.get_variable('W1', shape=[5, 40], initializer=xavier_init(5, 40))
W2 = tf.get_variable('W2', shape=[40, 40], initializer=xavier_init(40, 40))
W3 = tf.get_variable('W3', shape=[40, 1], initializer=xavier_init(40, 1))
B1 = tf.Variable(tf.random_normal([40]))
B2 = tf.Variable(tf.random_normal([40]))
B3 = tf.Variable(tf.random_normal([1]))
keep_prob = tf.placeholder(tf.float32)
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1))
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2))
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
hypothesis = tf.add(tf.matmul(L2, W3), B3)
trainY = pd.DataFrame(train['Survived'])
train = train.drop(['Survived'], axis=1)
trainx = np.array(train, dtype=np.float32)
trainy = np.array(trainY, dtype=np.float32)
print(trainy.dtype) | code |
2000829/cell_3 | [
"text_html_output_1.png"
] | import pandas as pd
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test.describe(include='all') | code |
2000829/cell_12 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
import tensorflow as tf
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['PassengerId', 'Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
test = test.drop(['Parch', 'Ticket', 'Cabin', 'Embarked'], axis=1)
combine = [train, test]
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\\.', expand=False)
for dataset in combine:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Capt', 'Col', 'Don', 'Dr', 'Major', 'Rev', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace(['Countess', 'Lady', 'Sir'], 'Royal')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss')
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')
title_mapping = {'Mr': 1, 'Miss': 2, 'Mrs': 3, 'Master': 4, 'Royal': 5, 'Rare': 6}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
train['Age'] = train['Age'].fillna(-0.5)
test['Age'] = test['Age'].fillna(-0.5)
bins = [-1, 0, 5, 12, 18, 24, 35, 60, np.inf]
labels = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
train['AgeGroup'] = pd.cut(train['Age'], bins, labels=labels)
test['AgeGroup'] = pd.cut(test['Age'], bins, labels=labels)
mr_age = train[train['Title'] == 1]['AgeGroup'].mode()
miss_age = train[train['Title'] == 2]['AgeGroup'].mode()
mrs_age = train[train['Title'] == 3]['AgeGroup'].mode()
master_age = train[train['Title'] == 4]['AgeGroup'].mode()
royal_age = train[train['Title'] == 5]['AgeGroup'].mode()
rare_age = train[train['Title'] == 6]['AgeGroup'].mode()
age_title_mapping = {1: 'Young Adult', 2: 'Student', 3: 'Adult', 4: 'Baby', 5: 'Adult', 6: 'Adult'}
for x in range(len(train['AgeGroup'])):
if train['AgeGroup'][x] == 'Unknown':
train['AgeGroup'][x] = age_title_mapping[train['Title'][x]]
for x in range(len(test['AgeGroup'])):
if test['AgeGroup'][x] == 'Unknown':
test['AgeGroup'][x] = age_title_mapping[test['Title'][x]]
age_mapping = {'Baby': 1, 'Child': 2, 'Teenager': 3, 'Student': 4, 'Young Adult': 5, 'Adult': 6, 'Senior': 7}
train['AgeGroup'] = train['AgeGroup'].map(age_mapping)
test['AgeGroup'] = test['AgeGroup'].map(age_mapping)
train = train.drop(['Age'], axis=1)
test = test.drop(['Age'], axis=1)
X = tf.placeholder(np.float32, [None, 5])
Y = tf.placeholder(np.float32, [None, 1])
def xavier_init(n_inputs, n_outputs, uniform=True):
if uniform:
init_range = tf.sqrt(6.0 / (n_inputs + n_outputs))
return tf.random_uniform_initializer(-init_range, init_range)
else:
stddev = tf.sqrt(3.0 / (n_inputs + n_outputs))
return tf.truncated_normal_initializer(stddev=stddev)
W1 = tf.get_variable('W1', shape=[5, 40], initializer=xavier_init(5, 40))
W2 = tf.get_variable('W2', shape=[40, 40], initializer=xavier_init(40, 40))
W3 = tf.get_variable('W3', shape=[40, 1], initializer=xavier_init(40, 1))
B1 = tf.Variable(tf.random_normal([40]))
B2 = tf.Variable(tf.random_normal([40]))
B3 = tf.Variable(tf.random_normal([1]))
keep_prob = tf.placeholder(tf.float32)
L1 = tf.nn.relu(tf.add(tf.matmul(X, W1), B1))
L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
L2 = tf.nn.relu(tf.add(tf.matmul(L1, W2), B2))
L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
hypothesis = tf.add(tf.matmul(L2, W3), B3)
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=hypothesis, labels=Y))
learning_rate = 0.0005
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
init = tf.initialize_all_variables() | code |
128021577/cell_13 | [
"text_html_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum()
df = df.drop_duplicates()
df.duplicated().sum()
sns.countplot(data=df, x='Prediction', color=sns.color_palette()[0])
plt.show() | code |
128021577/cell_9 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum() | code |
128021577/cell_34 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum()
df = df.drop_duplicates()
df.duplicated().sum()
results = {}
plt.figure(figsize=(10, 5))
plt.bar(results.keys(), [result['accuracy'] for result in results.values()])
plt.title('Accuracy of Different Models')
plt.xlabel('Models')
plt.ylabel('Accuracy')
plt.ylim(0, 1)
plt.show() | code |
128021577/cell_33 | [
"text_plain_output_1.png"
] | results = {}
results | code |
128021577/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LogisticRegression
LR = LogisticRegression()
LR.fit(x_train, y_train)
LR_y_pred = LR.predict(x_test) | code |
128021577/cell_7 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape | code |
128021577/cell_8 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.describe() | code |
128021577/cell_35 | [
"image_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum()
df = df.drop_duplicates()
df.duplicated().sum()
results = {}
plt.ylim(0, 1)
plt.figure(figsize=(10, 5))
plt.bar(results.keys(), [result['precision'] for result in results.values()])
plt.title('Precision of Different Models')
plt.xlabel('Models')
plt.ylabel('Precision')
plt.ylim(0, 1)
plt.show() | code |
128021577/cell_10 | [
"text_html_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum() | code |
128021577/cell_12 | [
"text_plain_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum()
df = df.drop_duplicates()
df.duplicated().sum() | code |
128021577/cell_5 | [
"image_output_1.png"
] | import pandas as pd
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df.head() | code |
128021577/cell_36 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df = pd.read_csv('/kaggle/input/email-spam-classification-dataset-csv/emails.csv')
df = df.drop(columns=['Email No.'])
df.shape
df.isna().sum()
df.duplicated().sum()
df = df.drop_duplicates()
df.duplicated().sum()
results = {}
plt.ylim(0, 1)
plt.ylim(0, 1)
plt.figure(figsize=(10, 5))
plt.bar(results.keys(), [result['recall'] for result in results.values()])
plt.title('Recall of Different Models')
plt.xlabel('Models')
plt.ylabel('Recall')
plt.ylim(0, 1)
plt.show() | code |
1007017/cell_6 | [
"text_plain_output_1.png"
] | import pandas
TRAIN_PATH = '../input/train.csv'
TEST_PATH = '../input/test.csv'
train = pandas.read_csv(TRAIN_PATH)
test = pandas.read_csv(TEST_PATH)
train.isnull().any() | code |
1007017/cell_26 | [
"text_plain_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import mean_absolute_error
age_rf = RandomForestRegressor()
age_rf.fit(age_train[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']], age_train['Age'])
age_validation = age_validation.assign(rf_age=age_rf.predict(age_validation[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']]))
mean_absolute_error(age_validation['Age'], age_validation['rf_age'], sample_weight=None, multioutput='uniform_average') | code |
1007017/cell_7 | [
"text_plain_output_1.png"
] | import pandas
TRAIN_PATH = '../input/train.csv'
TEST_PATH = '../input/test.csv'
train = pandas.read_csv(TRAIN_PATH)
test = pandas.read_csv(TEST_PATH)
test.isnull().any() | code |
1007017/cell_18 | [
"text_plain_output_1.png"
] | from sklearn import preprocessing
import pandas
import re
TRAIN_PATH = '../input/train.csv'
TEST_PATH = '../input/test.csv'
train = pandas.read_csv(TRAIN_PATH)
test = pandas.read_csv(TEST_PATH)
train.isnull().any()
test.isnull().any()
def deriveTitles(s):
title = re.search('(?:\\S )(?P<title>\\w*)', s).group('title')
if title == 'Mr':
return 'adult'
elif title == 'Don':
return 'gentry'
elif title == 'Dona':
return 'gentry'
elif title == 'Miss':
return 'miss'
elif title == 'Col':
return 'military'
elif title == 'Rev':
return 'other'
elif title == 'Lady':
return 'gentry'
elif title == 'Master':
return 'child'
elif title == 'Mme':
return 'adult'
elif title == 'Captain':
return 'military'
elif title == 'Dr':
return 'other'
elif title == 'Mrs':
return 'adult'
elif title == 'Sir':
return 'gentry'
elif title == 'Jonkheer':
return 'gentry'
elif title == 'Mlle':
return 'miss'
elif title == 'Major':
return 'military'
elif title == 'Ms':
return 'miss'
elif title == 'the Countess':
return 'gentry'
else:
return 'other'
train['title'] = train.Name.apply(deriveTitles)
test['title'] = test.Name.apply(deriveTitles)
le = preprocessing.LabelEncoder()
titles = ['adult', 'gentry', 'miss', 'military', 'other', 'child']
le.fit(titles)
train['encodedTitle'] = le.transform(train['title']).astype('int')
test['encodedTitle'] = le.transform(test['title']).astype('int')
train.Embarked.fillna(value='S', inplace=True)
combined = pandas.concat([train, test])
combined.ParChCategories = combined.Parch > 2
combined.boxplot(column='Age', by='Pclass') | code |
1007017/cell_28 | [
"text_html_output_1.png",
"text_plain_output_1.png"
] | from sklearn import linear_model
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
import pandas
import re
TRAIN_PATH = '../input/train.csv'
TEST_PATH = '../input/test.csv'
train = pandas.read_csv(TRAIN_PATH)
test = pandas.read_csv(TEST_PATH)
train.isnull().any()
test.isnull().any()
def deriveTitles(s):
title = re.search('(?:\\S )(?P<title>\\w*)', s).group('title')
if title == 'Mr':
return 'adult'
elif title == 'Don':
return 'gentry'
elif title == 'Dona':
return 'gentry'
elif title == 'Miss':
return 'miss'
elif title == 'Col':
return 'military'
elif title == 'Rev':
return 'other'
elif title == 'Lady':
return 'gentry'
elif title == 'Master':
return 'child'
elif title == 'Mme':
return 'adult'
elif title == 'Captain':
return 'military'
elif title == 'Dr':
return 'other'
elif title == 'Mrs':
return 'adult'
elif title == 'Sir':
return 'gentry'
elif title == 'Jonkheer':
return 'gentry'
elif title == 'Mlle':
return 'miss'
elif title == 'Major':
return 'military'
elif title == 'Ms':
return 'miss'
elif title == 'the Countess':
return 'gentry'
else:
return 'other'
train['title'] = train.Name.apply(deriveTitles)
test['title'] = test.Name.apply(deriveTitles)
le = preprocessing.LabelEncoder()
titles = ['adult', 'gentry', 'miss', 'military', 'other', 'child']
le.fit(titles)
train['encodedTitle'] = le.transform(train['title']).astype('int')
test['encodedTitle'] = le.transform(test['title']).astype('int')
train.Embarked.fillna(value='S', inplace=True)
combined = pandas.concat([train, test])
combined.ParChCategories = combined.Parch > 2
combined = combined.assign(SibSpGroup1=combined['SibSp'] < 2)
combined = combined.assign(SibSpGroup2=combined['SibSp'].between(2, 3, inclusive=True))
combined = combined.assign(SibSpGroup3=combined['SibSp'] > 2)
combined = combined.assign(ParChGT2=combined['Parch'] > 2)
age_train, age_validation = train_test_split(combined[combined.Age.notnull()], test_size=0.2)
age_learn = combined[combined.Age.isnull()]
age_rf = RandomForestRegressor()
age_rf.fit(age_train[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']], age_train['Age'])
age_validation = age_validation.assign(rf_age=age_rf.predict(age_validation[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']]))
mean_absolute_error(age_validation['Age'], age_validation['rf_age'], sample_weight=None, multioutput='uniform_average')
age_encoder = preprocessing.OneHotEncoder().fit(combined[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']])
age_training_encoded = age_encoder.transform(age_train[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']])
age_validation_encoded = age_encoder.transform(age_validation[['Pclass', 'encodedTitle', 'SibSpGroup1', 'SibSpGroup2', 'SibSpGroup3', 'ParChGT2']])
age_model = linear_model.RidgeCV(alphas=[0.1, 0.2, 0.3, 0.4, 0.5])
age_estimator = age_model.fit(age_training_encoded, age_train['Age'])
linear_age_predictions = age_estimator.predict(age_validation_encoded)
mean_absolute_error(age_validation['Age'], linear_age_predictions, sample_weight=None, multioutput='uniform_average') | code |
2014045/cell_21 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
prob_of_winning_given(bool_point_diff_cavs, DOWN_AT_HALF) | code |
2014045/cell_13 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
print(max_team)
print(max_prob_winning_DOWN_at_half) | code |
2014045/cell_9 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T) | code |
2014045/cell_25 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
df_warr = df2[df2.teamAbbr == 'GS']
point_diff_warr = make_point_diff_mat(df_warr)
np.corrcoef(point_diff_warr.T) | code |
2014045/cell_4 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2 | code |
2014045/cell_20 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T) | code |
2014045/cell_29 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
df_warr = df2[df2.teamAbbr == 'GS']
point_diff_warr = make_point_diff_mat(df_warr)
np.corrcoef(point_diff_warr.T)
bool_point_diff_warr = make_bool_point_diff_mat(df_warr)
np.corrcoef(bool_point_diff_warr.T)
prob_of_winning_given(bool_point_diff_warr, UP_AT_HALF) | code |
2014045/cell_26 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
df_warr = df2[df2.teamAbbr == 'GS']
point_diff_warr = make_point_diff_mat(df_warr)
np.corrcoef(point_diff_warr.T)
plt.scatter(point_diff_warr[:, 0], point_diff_warr[:, 1])
plt.ylabel('point differential: end of game')
plt.xlabel('point differential: end of first half') | code |
2014045/cell_11 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
prob_of_winning_given(bool_point_diff, UP_AT_HALF) | code |
2014045/cell_19 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
plt.scatter(point_diff_cavs[:, 0], point_diff_cavs[:, 1])
plt.ylabel('point differential: end of game')
plt.xlabel('point differential: end of first half') | code |
2014045/cell_7 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T) | code |
2014045/cell_18 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T) | code |
2014045/cell_28 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
df_warr = df2[df2.teamAbbr == 'GS']
point_diff_warr = make_point_diff_mat(df_warr)
np.corrcoef(point_diff_warr.T)
bool_point_diff_warr = make_bool_point_diff_mat(df_warr)
np.corrcoef(bool_point_diff_warr.T)
prob_of_winning_given(bool_point_diff_warr, DOWN_AT_HALF) | code |
2014045/cell_8 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
plt.scatter(point_diff[:, 0], point_diff[:, 1])
plt.ylabel('point differential: end of game')
plt.xlabel('point differential: end of first half') | code |
2014045/cell_15 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
print(max_team)
print(max_prob_winning_UP_at_half) | code |
2014045/cell_22 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
prob_of_winning_given(bool_point_diff_cavs, UP_AT_HALF) | code |
2014045/cell_10 | [
"text_html_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
prob_of_winning_given(bool_point_diff, DOWN_AT_HALF) | code |
2014045/cell_27 | [
"text_plain_output_1.png"
] | import numpy as np
import pandas as pd
df = pd.concat((df16, df17))
df2 = df[['teamAbbr', 'teamPTS', 'teamPTS1', 'teamPTS2', 'opptPTS', 'opptPTS1', 'opptPTS2']]
df2.loc[:, 'teamPTSH1'] = df2['teamPTS1'] + df['teamPTS2']
df2.loc[:, 'opptPTSH1'] = df2['opptPTS1'] + df['opptPTS2']
df2.loc[:, 'ptdiffH1'] = df2['teamPTSH1'] - df2['opptPTSH1']
df2.loc[:, 'ptdiff'] = df2['teamPTS'] - df2['opptPTS']
df2
def make_point_diff_mat(df):
point_diff_df = df[['ptdiffH1', 'ptdiff']]
point_diff = point_diff_df.as_matrix()
return point_diff
def make_bool_point_diff_mat(df):
point_diff = make_point_diff_mat(df)
bool_point_diff = np.copy(point_diff)
bool_point_diff[bool_point_diff > 0] = 1
bool_point_diff[bool_point_diff < 0] = -1
return bool_point_diff
def prob_of_winning_given(bool_point_diff, event):
return np.mean((bool_point_diff[bool_point_diff[:, 0] == event][:, 1] + 1) / 2)
point_diff = make_point_diff_mat(df2)
np.corrcoef(point_diff.T)
bool_point_diff = make_bool_point_diff_mat(df2)
np.corrcoef(bool_point_diff.T)
max_prob_winning_DOWN_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, DOWN_AT_HALF)
if prob > max_prob_winning_DOWN_at_half:
max_prob_winning_DOWN_at_half = prob
max_team = abbr
max_prob_winning_UP_at_half = 0
max_team = None
for abbr in df2.teamAbbr.unique():
df_team = df2[df.teamAbbr == abbr]
bool_point_diff_team = make_bool_point_diff_mat(df_team)
prob = prob_of_winning_given(bool_point_diff_team, UP_AT_HALF)
if prob > max_prob_winning_UP_at_half:
max_prob_winning_UP_at_half = prob
max_team = abbr
df_cavs = df2[df2['teamAbbr'] == 'CLE']
point_diff_cavs = make_point_diff_mat(df_cavs)
np.corrcoef(point_diff_cavs.T)
bool_point_diff_cavs = make_bool_point_diff_mat(df_cavs)
np.corrcoef(bool_point_diff_cavs.T)
df_warr = df2[df2.teamAbbr == 'GS']
point_diff_warr = make_point_diff_mat(df_warr)
np.corrcoef(point_diff_warr.T)
bool_point_diff_warr = make_bool_point_diff_mat(df_warr)
np.corrcoef(bool_point_diff_warr.T) | code |
2012289/cell_1 | [
"text_html_output_1.png"
] | import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats.stats import pearsonr
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
train = train.drop(['Name'], axis=1)
test = test.drop(['Name'], axis=1)
train.head() | code |
2012289/cell_3 | [
"text_plain_output_1.png"
] | import matplotlib
import numpy as np
import pandas as pd
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
matplotlib.rcParams['figure.figsize'] = (14.0, 7.0)
prices = pd.DataFrame({'Fare': train['Fare'], 'log(Fare + 1)': np.log1p(train['Fare'])})
prices.hist() | code |
2012289/cell_10 | [
"text_plain_output_1.png",
"image_output_1.png"
] | from scipy.stats import skew
from sklearn.linear_model import LogisticRegression
import matplotlib
import numpy as np
import pandas as pd
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
matplotlib.rcParams['figure.figsize'] = (14.0, 7.0)
prices = pd.DataFrame({'Fare': train['Fare'], 'log(Fare + 1)': np.log1p(train['Fare'])})
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda X: skew(X.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.Survived
logreg = LogisticRegression()
logreg.fit(X_train, y)
accuracy = round(logreg.score(X_train, y) * 100, 2)
print(accuracy)
logreg_preds = logreg.predict(X_test) | code |
2012289/cell_12 | [
"text_plain_output_1.png"
] | from scipy.stats import skew
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import matplotlib
import numpy as np
import pandas as pd
all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked']))
matplotlib.rcParams['figure.figsize'] = (14.0, 7.0)
prices = pd.DataFrame({'Fare': train['Fare'], 'log(Fare + 1)': np.log1p(train['Fare'])})
numeric_feats = all_data.dtypes[all_data.dtypes != 'object'].index
skewed_feats = train[numeric_feats].apply(lambda X: skew(X.dropna()))
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
all_data = pd.get_dummies(all_data)
all_data = all_data.fillna(all_data.mean())
X_train = all_data[:train.shape[0]]
X_test = all_data[train.shape[0]:]
y = train.Survived
logreg = LogisticRegression()
logreg.fit(X_train, y)
accuracy = round(logreg.score(X_train, y) * 100, 2)
logreg_preds = logreg.predict(X_test)
from sklearn.ensemble import RandomForestClassifier
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, y)
random_forest_preds = random_forest.predict(X_test)
random_forest.score(X_train, y)
accuracy = round(random_forest.score(X_train, y) * 100, 2)
print(accuracy) | code |
72112616/cell_9 | [
"text_html_output_1.png"
] | X_valid | code |
72112616/cell_4 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
cat_columns | code |
72112616/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols | code |
72112616/cell_1 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
72112616/cell_7 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
y = df_train.target
df_train.drop(['target'], axis=1, inplace=True)
df_train | code |
72112616/cell_18 | [
"text_html_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_encoder_Train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cordinal_cols]))
OH_encoder_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cordinal_cols]))
OH_encoder_Train.index = X_train.index
OH_encoder_valid.index = X_valid.index
X_train_drop = X_train.drop(cat_columns, axis=1)
X_valid_drop = X_valid.drop(cat_columns, axis=1)
OH_encoder_Train = pd.concat([OH_encoder_Train, X_train_drop], axis=1)
OH_encoder_valid = pd.concat([OH_encoder_valid, X_valid_drop], axis=1)
sample_submission = pd.read_csv('/kaggle/input/30-days-of-ml/sample_submission.csv')
sample_submission.head() | code |
72112616/cell_16 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_encoder_Train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cordinal_cols]))
OH_encoder_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cordinal_cols]))
OH_encoder_Train.index = X_train.index
OH_encoder_valid.index = X_valid.index
X_train_drop = X_train.drop(cat_columns, axis=1)
X_valid_drop = X_valid.drop(cat_columns, axis=1)
OH_encoder_Train = pd.concat([OH_encoder_Train, X_train_drop], axis=1)
OH_encoder_valid = pd.concat([OH_encoder_valid, X_valid_drop], axis=1)
OH_encoder_valid | code |
72112616/cell_17 | [
"text_html_output_1.png"
] | from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_encoder_Train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cordinal_cols]))
OH_encoder_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cordinal_cols]))
OH_encoder_Train.index = X_train.index
OH_encoder_valid.index = X_valid.index
X_train_drop = X_train.drop(cat_columns, axis=1)
X_valid_drop = X_valid.drop(cat_columns, axis=1)
OH_encoder_Train = pd.concat([OH_encoder_Train, X_train_drop], axis=1)
OH_encoder_valid = pd.concat([OH_encoder_valid, X_valid_drop], axis=1)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
model_randm_forest = RandomForestRegressor(n_estimators=100, random_state=0)
model_randm_forest.fit(OH_encoder_Train, y_train)
valid_pred = model_randm_forest.predict(OH_encoder_valid)
print(mean_absolute_error(y_valid, valid_pred)) | code |
72112616/cell_14 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_encoder_Train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cordinal_cols]))
OH_encoder_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cordinal_cols]))
OH_encoder_Train.index = X_train.index
OH_encoder_valid.index = X_valid.index
X_train_drop = X_train.drop(cat_columns, axis=1)
X_valid_drop = X_valid.drop(cat_columns, axis=1)
X_train_drop | code |
72112616/cell_12 | [
"text_plain_output_1.png"
] | from sklearn.preprocessing import OneHotEncoder
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
low_cordinal_cols = [col for col in cat_columns if df_train[col].nunique() < 10]
low_cordinal_cols
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_encoder_Train = pd.DataFrame(OH_encoder.fit_transform(X_train[low_cordinal_cols]))
OH_encoder_valid = pd.DataFrame(OH_encoder.transform(X_valid[low_cordinal_cols]))
OH_encoder_Train.index = X_train.index
OH_encoder_valid.index = X_valid.index
OH_encoder_valid.head() | code |
72112616/cell_5 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df_train = pd.read_csv('/kaggle/input/30-days-of-ml/train.csv')
df_test = pd.read_csv('/kaggle/input/30-days-of-ml/test.csv')
cat_columns = [col for col in df_train.columns if df_train[col].dtype == 'object']
df_train[cat_columns].nunique() | code |
129039870/cell_6 | [
"text_plain_output_1.png"
] | !octave -W myinstall.m | code |
129039870/cell_2 | [
"text_plain_output_1.png"
] | !apt-get update
!apt --yes install octave
!apt-get install --yes liboctave-dev | code |
129039870/cell_8 | [
"text_plain_output_1.png"
] | !octave -W main.m | code |
34130462/cell_21 | [
"text_plain_output_1.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv')
example_shas = []
example_uids = []
for index, row in example_df.iterrows():
study_title = row['Study']
study_metadata = metadata_df[metadata_df['title'] == study_title]
if len(study_metadata) != 0:
sha = study_metadata.iloc[0]['sha']
uid = study_metadata.iloc[0].name
if str(sha) != 'nan':
example_shas.append(sha)
example_uids.append(uid)
unique_example_uids = set(example_uids)
len(unique_example_uids)
embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0)
available_uids = unique_example_uids.intersection(embeddings_df.index)
example_embeddings_df = embeddings_df.loc[available_uids]
example_embeddings_df | code |
34130462/cell_23 | [
"text_plain_output_1.png"
] | import matplotlib.pyplot as plt
import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv')
example_shas = []
example_uids = []
for index, row in example_df.iterrows():
study_title = row['Study']
study_metadata = metadata_df[metadata_df['title'] == study_title]
if len(study_metadata) != 0:
sha = study_metadata.iloc[0]['sha']
uid = study_metadata.iloc[0].name
if str(sha) != 'nan':
example_shas.append(sha)
example_uids.append(uid)
unique_example_uids = set(example_uids)
len(unique_example_uids)
embeddings_df = pd.read_csv('../input/CORD-19-research-challenge/cord_19_embeddings_4_24/cord_19_embeddings_4_24.csv', header=None, index_col=0)
available_uids = unique_example_uids.intersection(embeddings_df.index)
example_embeddings_df = embeddings_df.loc[available_uids]
for i in range(1, len(embeddings_df.columns), 2):
plt.scatter(embeddings_df[i], embeddings_df[i + 1])
plt.scatter(example_embeddings_df[i], example_embeddings_df[i + 1])
plt.show() | code |
34130462/cell_6 | [
"text_html_output_1.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid') | code |
34130462/cell_7 | [
"image_output_11.png",
"image_output_239.png",
"image_output_98.png",
"image_output_337.png",
"image_output_121.png",
"image_output_180.png",
"image_output_331.png",
"image_output_379.png",
"image_output_384.png",
"image_output_303.png",
"image_output_157.png",
"image_output_74.png",
"image_output_279.png",
"image_output_181.png",
"image_output_290.png",
"image_output_156.png",
"image_output_310.png",
"image_output_204.png",
"image_output_299.png",
"image_output_330.png",
"image_output_174.png",
"image_output_244.png",
"image_output_82.png",
"image_output_305.png",
"image_output_173.png",
"image_output_24.png",
"image_output_209.png",
"image_output_380.png",
"image_output_159.png",
"image_output_139.png",
"image_output_104.png",
"image_output_220.png",
"image_output_46.png",
"image_output_207.png",
"image_output_327.png",
"image_output_295.png",
"image_output_251.png",
"image_output_232.png",
"image_output_208.png",
"image_output_106.png",
"image_output_85.png",
"image_output_349.png",
"image_output_368.png",
"image_output_373.png",
"image_output_149.png",
"image_output_108.png",
"image_output_270.png",
"image_output_150.png",
"image_output_383.png",
"image_output_25.png",
"image_output_266.png",
"image_output_190.png",
"image_output_228.png",
"image_output_183.png",
"image_output_202.png",
"image_output_275.png",
"image_output_312.png",
"image_output_77.png",
"image_output_179.png",
"image_output_319.png",
"image_output_148.png",
"image_output_47.png",
"image_output_344.png",
"image_output_141.png",
"image_output_233.png",
"image_output_229.png",
"image_output_316.png",
"image_output_242.png",
"image_output_171.png",
"image_output_78.png",
"image_output_219.png",
"image_output_227.png",
"image_output_170.png",
"image_output_17.png",
"image_output_30.png",
"image_output_257.png",
"image_output_73.png",
"image_output_309.png",
"image_output_221.png",
"image_output_355.png",
"image_output_72.png",
"image_output_356.png",
"image_output_336.png",
"image_output_14.png",
"image_output_59.png",
"image_output_351.png",
"image_output_39.png",
"image_output_97.png",
"image_output_378.png",
"image_output_247.png",
"image_output_357.png",
"image_output_361.png",
"image_output_28.png",
"image_output_86.png",
"image_output_137.png",
"image_output_160.png",
"image_output_234.png",
"image_output_84.png",
"image_output_125.png",
"image_output_81.png",
"image_output_300.png",
"image_output_165.png",
"image_output_194.png",
"image_output_342.png",
"image_output_273.png",
"image_output_23.png",
"image_output_136.png",
"image_output_367.png",
"image_output_34.png",
"image_output_308.png",
"image_output_64.png",
"image_output_282.png",
"image_output_119.png",
"image_output_360.png",
"image_output_237.png",
"image_output_225.png",
"image_output_131.png",
"image_output_134.png",
"image_output_178.png",
"image_output_177.png",
"image_output_377.png",
"image_output_188.png",
"image_output_144.png",
"image_output_335.png",
"image_output_252.png",
"image_output_13.png",
"image_output_128.png",
"image_output_184.png",
"image_output_155.png",
"image_output_40.png",
"image_output_224.png",
"image_output_5.png",
"image_output_48.png",
"image_output_114.png",
"image_output_146.png",
"image_output_68.png",
"image_output_195.png",
"image_output_142.png",
"image_output_280.png",
"image_output_109.png",
"image_output_75.png",
"image_output_18.png",
"image_output_127.png",
"image_output_143.png",
"image_output_324.png",
"image_output_314.png",
"image_output_313.png",
"image_output_283.png",
"image_output_58.png",
"image_output_245.png",
"image_output_118.png",
"image_output_145.png",
"image_output_254.png",
"image_output_269.png",
"image_output_296.png",
"image_output_110.png",
"image_output_116.png",
"image_output_286.png",
"image_output_277.png",
"image_output_169.png",
"image_output_271.png",
"image_output_374.png",
"image_output_107.png",
"image_output_92.png",
"image_output_21.png",
"image_output_372.png",
"image_output_248.png",
"image_output_120.png",
"image_output_276.png",
"image_output_256.png",
"image_output_332.png",
"image_output_105.png",
"image_output_52.png",
"image_output_288.png",
"image_output_362.png",
"image_output_307.png",
"image_output_292.png",
"image_output_60.png",
"image_output_7.png",
"image_output_343.png",
"image_output_62.png",
"image_output_96.png",
"image_output_186.png",
"image_output_182.png",
"image_output_152.png",
"image_output_322.png",
"image_output_185.png",
"image_output_235.png",
"image_output_167.png",
"image_output_56.png",
"image_output_196.png",
"image_output_346.png",
"image_output_31.png",
"image_output_65.png",
"image_output_115.png",
"image_output_291.png",
"image_output_20.png",
"image_output_359.png",
"image_output_69.png",
"image_output_298.png",
"image_output_369.png",
"image_output_241.png",
"image_output_32.png",
"image_output_53.png",
"image_output_230.png",
"image_output_352.png",
"image_output_4.png",
"image_output_304.png",
"image_output_51.png",
"image_output_274.png",
"image_output_338.png",
"image_output_318.png",
"image_output_162.png",
"image_output_210.png",
"image_output_103.png",
"image_output_348.png",
"image_output_226.png",
"image_output_201.png",
"image_output_253.png",
"image_output_341.png",
"image_output_117.png",
"image_output_217.png",
"image_output_339.png",
"image_output_83.png",
"image_output_382.png",
"image_output_317.png",
"image_output_213.png",
"image_output_172.png",
"image_output_42.png",
"image_output_306.png",
"image_output_381.png",
"image_output_240.png",
"image_output_35.png",
"image_output_263.png",
"image_output_311.png",
"image_output_90.png",
"image_output_302.png",
"image_output_41.png",
"image_output_57.png",
"image_output_260.png",
"image_output_222.png",
"image_output_329.png",
"image_output_36.png",
"image_output_265.png",
"image_output_8.png",
"image_output_37.png",
"image_output_66.png",
"image_output_16.png",
"image_output_192.png",
"image_output_211.png",
"image_output_163.png",
"image_output_91.png",
"image_output_70.png",
"image_output_138.png",
"image_output_158.png",
"image_output_285.png",
"image_output_67.png",
"image_output_27.png",
"image_output_353.png",
"image_output_354.png",
"image_output_287.png",
"image_output_261.png",
"image_output_333.png",
"image_output_122.png",
"image_output_54.png",
"image_output_297.png",
"image_output_323.png",
"image_output_189.png",
"image_output_363.png",
"image_output_6.png",
"image_output_301.png",
"image_output_45.png",
"image_output_246.png",
"image_output_365.png",
"image_output_250.png",
"image_output_63.png",
"image_output_71.png",
"image_output_153.png",
"image_output_126.png",
"image_output_281.png",
"image_output_80.png",
"image_output_289.png",
"image_output_112.png",
"image_output_164.png",
"image_output_293.png",
"image_output_326.png",
"image_output_347.png",
"image_output_95.png",
"image_output_123.png",
"image_output_147.png",
"image_output_198.png",
"image_output_370.png",
"image_output_212.png",
"image_output_278.png",
"image_output_364.png",
"image_output_340.png",
"image_output_243.png",
"image_output_93.png",
"image_output_205.png",
"image_output_206.png",
"image_output_214.png",
"image_output_12.png",
"image_output_284.png",
"image_output_161.png",
"image_output_231.png",
"image_output_22.png",
"image_output_132.png",
"image_output_328.png",
"image_output_320.png",
"image_output_89.png",
"image_output_315.png",
"image_output_268.png",
"image_output_55.png",
"image_output_133.png",
"image_output_216.png",
"image_output_218.png",
"image_output_191.png",
"image_output_151.png",
"image_output_200.png",
"image_output_294.png",
"image_output_94.png",
"image_output_3.png",
"image_output_111.png",
"image_output_101.png",
"image_output_366.png",
"image_output_249.png",
"image_output_135.png",
"image_output_29.png",
"image_output_238.png",
"image_output_325.png",
"image_output_193.png",
"image_output_187.png",
"image_output_44.png",
"image_output_199.png",
"image_output_130.png",
"image_output_43.png",
"image_output_2.png",
"image_output_375.png",
"image_output_262.png",
"image_output_1.png",
"image_output_350.png",
"image_output_10.png",
"image_output_259.png",
"image_output_168.png",
"image_output_258.png",
"image_output_236.png",
"image_output_154.png",
"image_output_102.png",
"image_output_176.png",
"image_output_321.png",
"image_output_175.png",
"image_output_124.png",
"image_output_88.png",
"image_output_272.png",
"image_output_33.png",
"image_output_140.png",
"image_output_345.png",
"image_output_358.png",
"image_output_87.png",
"image_output_255.png",
"image_output_50.png",
"image_output_15.png",
"image_output_267.png",
"image_output_99.png",
"image_output_49.png",
"image_output_197.png",
"image_output_100.png",
"image_output_129.png",
"image_output_166.png",
"image_output_76.png",
"image_output_223.png",
"image_output_9.png",
"image_output_19.png",
"image_output_371.png",
"image_output_79.png",
"image_output_215.png",
"image_output_61.png",
"image_output_203.png",
"image_output_38.png",
"image_output_334.png",
"image_output_113.png",
"image_output_26.png",
"image_output_376.png",
"image_output_264.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
metadata_df | code |
34130462/cell_15 | [
"text_html_output_1.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv')
example_shas = []
example_uids = []
for index, row in example_df.iterrows():
study_title = row['Study']
study_metadata = metadata_df[metadata_df['title'] == study_title]
if len(study_metadata) != 0:
sha = study_metadata.iloc[0]['sha']
uid = study_metadata.iloc[0].name
if str(sha) != 'nan':
example_shas.append(sha)
example_uids.append(uid)
example_uids | code |
34130462/cell_17 | [
"text_html_output_1.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv')
example_shas = []
example_uids = []
for index, row in example_df.iterrows():
study_title = row['Study']
study_metadata = metadata_df[metadata_df['title'] == study_title]
if len(study_metadata) != 0:
sha = study_metadata.iloc[0]['sha']
uid = study_metadata.iloc[0].name
if str(sha) != 'nan':
example_shas.append(sha)
example_uids.append(uid)
unique_example_uids = set(example_uids)
len(unique_example_uids) | code |
34130462/cell_10 | [
"application_vnd.jupyter.stderr_output_1.png"
] | import pandas as pd
metadata_df = pd.read_csv('../input/CORD-19-research-challenge/metadata.csv', index_col='cord_uid')
example_df = pd.read_csv('../input/CORD-19-research-challenge/Kaggle/target_tables/2_relevant_factors/How does temperature and humidity affect the transmission of 2019-nCoV.csv')
example_df | code |
32068850/cell_42 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
sbc[sbc['Mid-Career Median Salary'] > 100000] | code |
32068850/cell_81 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbr.groupby('Region')['Starting Median Salary'].mean()
sbr.groupby('Region')['Starting Median Salary'].size() | code |
32068850/cell_83 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbr.groupby('Region')['Starting Median Salary'].mean()
sbr.groupby('Region')['Starting Median Salary'].size()
sbr['Region'].value_counts() | code |
32068850/cell_57 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
round(sbc['Mid-Career Median Salary'].std(), 2) | code |
32068850/cell_33 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College'] | code |
32068850/cell_87 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
sbc = sbc.sort_values('School Name')
sbc.groupby('School Type')['Starting Median Salary'].mean().plot.bar() | code |
32068850/cell_55 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
round(sbm['Mid-Career Median Salary'].std(), 2) | code |
32068850/cell_6 | [
"text_plain_output_1.png"
] | import os
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename)) | code |
32068850/cell_76 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbr.groupby('Region')['Starting Median Salary'].mean() | code |
32068850/cell_39 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbm[sbm['Mid-Career Median Salary'] > 100000] | code |
32068850/cell_26 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0] | code |
32068850/cell_91 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
sbc = sbc.sort_values('School Name')
sbc[sbc['School Type'] == 'Ivy League'].sort_values('Starting Median Salary')['Starting Median Salary'].plot.barh() | code |
32068850/cell_65 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
sbc = sbc.sort_values('School Name')
sbc.head() | code |
32068850/cell_48 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
round(sbm['Mid-Career Median Salary'].mean(), 2) | code |
32068850/cell_73 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbr.head() | code |
32068850/cell_67 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbm = sbm.sort_values('Mid-Career Median Salary', ascending=False)
sbm['Starting Median Salary'].max() | code |
32068850/cell_19 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.tail() | code |
32068850/cell_69 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbm = sbm.sort_values('Mid-Career Median Salary', ascending=False)
sbm['Starting Median Salary'].idxmax() | code |
32068850/cell_52 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
round(sbc['Mid-Career Median Salary'].median(), 2) | code |
32068850/cell_49 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
round(sbm['Mid-Career Median Salary'].median(), 2) | code |
32068850/cell_18 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.head() | code |
32068850/cell_89 | [
"text_plain_output_1.png",
"image_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
sbc = sbc.sort_values('School Name')
sbc['School Type'].value_counts().plot.pie() | code |
32068850/cell_51 | [
"text_plain_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbc.index
sbc.columns
sbc.shape[0]
sbc.shape[1]
sbc.loc['Harvey Mudd College']
sbc.loc[['Cooper Union', 'Harvey Mudd College', 'Amherst College', 'Auburn University']]
sbc.iloc[100:-100]
round(sbc['Mid-Career Median Salary'].mean(), 2) | code |
32068850/cell_62 | [
"text_html_output_1.png"
] | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
sbm = pd.read_csv('/kaggle/input/college-salaries/degrees-that-pay-back.csv').set_index('Undergraduate Major')
sbc = pd.read_csv('/kaggle/input/college-salaries/salaries-by-college-type.csv').set_index('School Name')
sbr = pd.read_csv('/kaggle/input/college-salaries/salaries-by-region.csv').set_index('School Name')
sbm = sbm.sort_values('Mid-Career Median Salary', ascending=False)
sbm.head() | code |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.